diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5bec8e8e02..d7dff5f3dd 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -8,6 +8,7 @@ * Add `databricks_users` data source ([#4028](https://github.com/databricks/terraform-provider-databricks/pull/4028)) * Improve `databricks_service_principals` data source ([#5164](https://github.com/databricks/terraform-provider-databricks/pull/5164)) +* Added `databricks_permission` resource for managing permissions on Databricks objects for individual principals ([#5186](https://github.com/databricks/terraform-provider-databricks/pull/5186)). ### Bug Fixes diff --git a/docs/resources/permission.md b/docs/resources/permission.md new file mode 100644 index 0000000000..27c4a1fb69 --- /dev/null +++ b/docs/resources/permission.md @@ -0,0 +1,294 @@ +--- +subcategory: "Security" +--- + +# databricks_permission Resource + +This resource allows you to manage permissions for a single principal on a Databricks workspace object. Unlike `databricks_permissions`, which manages all principals' permissions for an object at once, this resource is authoritative for a specific object-principal pair only. + +-> This resource can only be used with a workspace-level provider! + +~> This resource is _authoritative_ for the specified object-principal pair. Configuring this resource will manage the permission for the specified principal only, without affecting permissions for other principals. + +~> **Warning:** Do not use both `databricks_permission` and `databricks_permissions` resources for the same object. This will cause conflicts as both resources manage the same permissions. + +-> Use `databricks_permissions` when you need to manage all permissions for an object in a single resource. Use `databricks_permission` (singular) when you want to manage permissions for individual principals independently. + +## Example Usage + +### Cluster Permissions + +```hcl +resource "databricks_cluster" "shared" { + cluster_name = "Shared Analytics" + spark_version = data.databricks_spark_version.latest.id + node_type_id = data.databricks_node_type.smallest.id + autotermination_minutes = 20 + + autoscale { + min_workers = 1 + max_workers = 10 + } +} + +resource "databricks_group" "data_engineers" { + display_name = "Data Engineers" +} + +# Grant CAN_RESTART permission to a group +resource "databricks_permission" "cluster_de" { + object_type = "clusters" + object_id = databricks_cluster.shared.id + group_name = databricks_group.data_engineers.display_name + permission_level = "CAN_RESTART" +} + +# Grant CAN_ATTACH_TO permission to a user +resource "databricks_permission" "cluster_analyst" { + object_type = "clusters" + object_id = databricks_cluster.shared.id + user_name = "analyst@company.com" + permission_level = "CAN_ATTACH_TO" +} +``` + +### Job Permissions + +```hcl +resource "databricks_job" "etl" { + name = "ETL Pipeline" + + task { + task_key = "process_data" + + notebook_task { + notebook_path = "/Shared/ETL" + } + + new_cluster { + num_workers = 2 + spark_version = data.databricks_spark_version.latest.id + node_type_id = data.databricks_node_type.smallest.id + } + } +} + +# Grant CAN_MANAGE to a service principal +resource "databricks_permission" "job_sp" { + object_type = "jobs" + object_id = databricks_job.etl.id + service_principal_name = databricks_service_principal.automation.application_id + permission_level = "CAN_MANAGE" +} + +# Grant CAN_VIEW to a group +resource "databricks_permission" "job_viewers" { + object_type = "jobs" + object_id = databricks_job.etl.id + group_name = "Data Viewers" + permission_level = "CAN_VIEW" +} +``` + +### Notebook Permissions + +```hcl +resource "databricks_notebook" "analysis" { + path = "/Shared/Analysis" + language = "PYTHON" + content_base64 = base64encode(<<-EOT + # Analysis Notebook + print("Hello, World!") + EOT + ) +} + +# Grant CAN_RUN to a user +resource "databricks_permission" "notebook_user" { + object_type = "notebooks" + object_id = databricks_notebook.analysis.path + user_name = "data.scientist@company.com" + permission_level = "CAN_RUN" +} + +# Grant CAN_EDIT to a group +resource "databricks_permission" "notebook_editors" { + object_type = "notebooks" + object_id = databricks_notebook.analysis.path + group_name = "Notebook Editors" + permission_level = "CAN_EDIT" +} +``` + +### Token Permissions + +This resource solves the limitation where all token permissions must be defined in a single `databricks_permissions` resource: + +```hcl +# Multiple resources can now manage different principals independently +resource "databricks_permission" "tokens_team_a" { + object_type = "authorization" + object_id = "tokens" + group_name = "Team A" + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_b" { + object_type = "authorization" + object_id = "tokens" + group_name = "Team B" + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_service_account" { + object_type = "authorization" + object_id = "tokens" + service_principal_name = databricks_service_principal.ci_cd.application_id + permission_level = "CAN_USE" +} +``` + +### SQL Endpoint Permissions + +```hcl +resource "databricks_sql_endpoint" "analytics" { + name = "Analytics Warehouse" + cluster_size = "Small" + max_num_clusters = 1 +} + +resource "databricks_permission" "warehouse_users" { + object_type = "sql/warehouses" + object_id = databricks_sql_endpoint.analytics.id + group_name = "SQL Users" + permission_level = "CAN_USE" +} +``` + +## Argument Reference + +The following arguments are required: + +* `object_type` - (Required) The type of object to manage permissions for. Valid values include: + * `clusters` - For cluster permissions + * `cluster-policies` - For cluster policy permissions + * `instance-pools` - For instance pool permissions + * `jobs` - For job permissions + * `pipelines` - For Delta Live Tables pipeline permissions + * `notebooks` - For notebook permissions (use path as `object_id`) + * `directories` - For directory permissions (use path as `object_id`) + * `workspace-files` - For workspace file permissions (use path as `object_id`) + * `registered-models` - For registered model permissions + * `experiments` - For experiment permissions + * `sql-dashboards` - For legacy SQL dashboard permissions + * `sql/warehouses` - For SQL warehouse permissions + * `queries` - For query permissions + * `alerts` - For alert permissions + * `dashboards` - For Lakeview dashboard permissions + * `repos` - For repo permissions + * `authorization` - For authorization permissions (use `tokens` or `passwords` as `object_id`) + * `serving-endpoints` - For model serving endpoint permissions + * `vector-search-endpoints` - For vector search endpoint permissions + +* `object_id` - (Required) The ID or path of the object. For notebooks, directories, and workspace files, use the path (e.g., `/Shared/notebook`). For authorization, use `tokens` or `passwords`. For other objects, use the resource ID. + +* `permission_level` - (Required) The permission level to grant. The available permission levels depend on the object type. Common values include `CAN_MANAGE`, `CAN_USE`, `CAN_VIEW`, `CAN_RUN`, `CAN_EDIT`, `CAN_READ`, `CAN_RESTART`, `CAN_ATTACH_TO`. + +Exactly one of the following principal identifiers must be specified: + +* `user_name` - (Optional) User email address to grant permissions to. Conflicts with `group_name` and `service_principal_name`. +* `group_name` - (Optional) Group name to grant permissions to. Conflicts with `user_name` and `service_principal_name`. +* `service_principal_name` - (Optional) Application ID of the service principal. Conflicts with `user_name` and `group_name`. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the permission in the format `///`. +* `object_type` - The type of object (e.g., `clusters`, `jobs`, `notebooks`). + +## Import + +Permissions can be imported using the format `///`. For example: + +```bash +terraform import databricks_permission.cluster_analyst /clusters/0123-456789-abc12345/analyst@company.com +``` + +## Comparison with databricks_permissions + +### When to use `databricks_permission` (singular) + +* You want to manage permissions for individual principals independently +* Different teams manage permissions for different principals on the same object +* You need to avoid the "all-or-nothing" approach of `databricks_permissions` +* You want to add/remove principals without affecting others +* Special cases like token permissions where multiple independent configurations are needed + +### When to use `databricks_permissions` (plural) + +* You want to manage all permissions for an object in one place +* You have a small, stable set of principals for an object +* You want to ensure no unexpected permissions exist on the object +* You prefer a single source of truth for all permissions on an object + +### Example Comparison + +**Using `databricks_permissions` (manages ALL principals)**: + +```hcl +resource "databricks_permissions" "cluster_all" { + cluster_id = databricks_cluster.shared.id + + access_control { + group_name = "Data Engineers" + permission_level = "CAN_RESTART" + } + + access_control { + user_name = "analyst@company.com" + permission_level = "CAN_ATTACH_TO" + } + + # Adding a third principal requires modifying this resource +} +``` + +**Using `databricks_permission` (manages ONE principal per resource)**: + +```hcl +resource "databricks_permission" "cluster_de" { + object_type = "clusters" + object_id = databricks_cluster.shared.id + group_name = "Data Engineers" + permission_level = "CAN_RESTART" +} + +resource "databricks_permission" "cluster_analyst" { + object_type = "clusters" + object_id = databricks_cluster.shared.id + user_name = "analyst@company.com" + permission_level = "CAN_ATTACH_TO" +} + +# Adding a third principal is a separate resource +# No need to modify existing resources +resource "databricks_permission" "cluster_viewer" { + object_type = "clusters" + object_id = databricks_cluster.shared.id + group_name = "Viewers" + permission_level = "CAN_ATTACH_TO" +} +``` + +## Related Resources + +The following resources are used in the same context: + +* [databricks_permissions](permissions.md) - Manage all permissions for an object at once +* [databricks_cluster](cluster.md) - Create Databricks clusters +* [databricks_job](job.md) - Create Databricks jobs +* [databricks_notebook](notebook.md) - Manage Databricks notebooks +* [databricks_group](group.md) - Manage Databricks groups +* [databricks_user](user.md) - Manage Databricks users +* [databricks_service_principal](service_principal.md) - Manage Databricks service principals diff --git a/internal/providers/pluginfw/pluginfw_rollout_utils.go b/internal/providers/pluginfw/pluginfw_rollout_utils.go index b5bfa28245..10e2bf9b00 100644 --- a/internal/providers/pluginfw/pluginfw_rollout_utils.go +++ b/internal/providers/pluginfw/pluginfw_rollout_utils.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/dashboards" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/notificationdestinations" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/permissions" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/registered_model" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/serving" @@ -50,6 +51,7 @@ var migratedDataSources = []func() datasource.DataSource{ var pluginFwOnlyResources = append( []func() resource.Resource{ app.ResourceApp, + permissions.ResourcePermission, }, autoGeneratedResources..., ) diff --git a/internal/providers/pluginfw/products/permissions/object_mutex.go b/internal/providers/pluginfw/products/permissions/object_mutex.go new file mode 100644 index 0000000000..cd2a6a89e7 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/object_mutex.go @@ -0,0 +1,58 @@ +package permissions + +import ( + "fmt" + "sync" +) + +// objectMutexManager manages mutexes per object to prevent concurrent +// operations on the same Databricks object that could lead to race conditions. +// +// This is particularly important for Delete operations where multiple +// databricks_permission resources for the same object might be deleted +// concurrently, each doing GET -> filter -> SET, which could result in +// lost permission updates. +type objectMutexManager struct { + mutexes sync.Map // map[string]*sync.Mutex +} + +// globalObjectMutexManager is the singleton instance used by all permission resources +var globalObjectMutexManager = &objectMutexManager{} + +// Lock acquires a mutex for the given object type and ID. +// Each object gets its own mutex to allow concurrent operations on different objects +// while serializing operations on the same object. +func (m *objectMutexManager) Lock(objectType, objectID string) { + key := fmt.Sprintf("%s/%s", objectType, objectID) + + // LoadOrStore returns the existing value if present, otherwise stores and returns the given value + value, _ := m.mutexes.LoadOrStore(key, &sync.Mutex{}) + mu := value.(*sync.Mutex) + + // Lock the object-specific mutex + mu.Lock() +} + +// Unlock releases the mutex for the given object type and ID. +func (m *objectMutexManager) Unlock(objectType, objectID string) { + key := fmt.Sprintf("%s/%s", objectType, objectID) + + value, ok := m.mutexes.Load(key) + if ok { + mu := value.(*sync.Mutex) + mu.Unlock() + } +} + +// lockObject acquires a lock for the given object type and ID. +// This should be called at the start of any operation that modifies permissions. +func lockObject(objectType, objectID string) { + globalObjectMutexManager.Lock(objectType, objectID) +} + +// unlockObject releases the lock for the given object type and ID. +// This should be called at the end of any operation that modifies permissions. +// Use defer to ensure it's always called even if the operation panics. +func unlockObject(objectType, objectID string) { + globalObjectMutexManager.Unlock(objectType, objectID) +} diff --git a/internal/providers/pluginfw/products/permissions/object_mutex_test.go b/internal/providers/pluginfw/products/permissions/object_mutex_test.go new file mode 100644 index 0000000000..36d0400ce4 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/object_mutex_test.go @@ -0,0 +1,145 @@ +package permissions + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestObjectMutexManager_LockUnlock(t *testing.T) { + manager := &objectMutexManager{} + + objectType := "clusters" + objectID := "test-cluster" + + // First lock should succeed immediately + manager.Lock(objectType, objectID) + + // Verify mutex was created by checking we can load it + key := "clusters/test-cluster" + _, exists := manager.mutexes.Load(key) + assert.True(t, exists, "Mutex should be created for object") + + // Unlock + manager.Unlock(objectType, objectID) +} + +func TestObjectMutexManager_ConcurrentAccess(t *testing.T) { + manager := &objectMutexManager{} + + objectType := "clusters" + objectID := "test-cluster" + var counter int32 + var wg sync.WaitGroup + + // Simulate 10 concurrent operations on the same object + // The mutex should ensure they execute serially + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + manager.Lock(objectType, objectID) + defer manager.Unlock(objectType, objectID) + + // Simulate some work with a counter + // Without proper locking, this would cause race conditions + current := atomic.LoadInt32(&counter) + time.Sleep(time.Millisecond) // Simulate some processing + atomic.StoreInt32(&counter, current+1) + }() + } + + wg.Wait() + + // All 10 operations should have completed + assert.Equal(t, int32(10), atomic.LoadInt32(&counter), "All operations should complete") +} + +func TestObjectMutexManager_DifferentObjects(t *testing.T) { + manager := &objectMutexManager{} + + objectType1 := "clusters" + objectID1 := "cluster-1" + objectType2 := "clusters" + objectID2 := "cluster-2" + + var wg sync.WaitGroup + results := make([]string, 0, 2) + var resultsMutex sync.Mutex + + // Operations on different objects should run concurrently + wg.Add(2) + + go func() { + defer wg.Done() + manager.Lock(objectType1, objectID1) + defer manager.Unlock(objectType1, objectID1) + + time.Sleep(50 * time.Millisecond) + resultsMutex.Lock() + results = append(results, "object1") + resultsMutex.Unlock() + }() + + go func() { + defer wg.Done() + manager.Lock(objectType2, objectID2) + defer manager.Unlock(objectType2, objectID2) + + time.Sleep(50 * time.Millisecond) + resultsMutex.Lock() + results = append(results, "object2") + resultsMutex.Unlock() + }() + + wg.Wait() + + // Both operations should complete (order doesn't matter) + assert.Len(t, results, 2, "Both operations should complete") +} + +func TestObjectMutexManager_DifferentObjectTypes(t *testing.T) { + manager := &objectMutexManager{} + + objectType1 := "clusters" + objectType2 := "jobs" + objectID := "same-id" // Same ID but different types + + var wg sync.WaitGroup + results := make([]string, 0, 2) + var resultsMutex sync.Mutex + + // Operations on different object types should run concurrently even with same ID + wg.Add(2) + + go func() { + defer wg.Done() + manager.Lock(objectType1, objectID) + defer manager.Unlock(objectType1, objectID) + + time.Sleep(50 * time.Millisecond) + resultsMutex.Lock() + results = append(results, "clusters") + resultsMutex.Unlock() + }() + + go func() { + defer wg.Done() + manager.Lock(objectType2, objectID) + defer manager.Unlock(objectType2, objectID) + + time.Sleep(50 * time.Millisecond) + resultsMutex.Lock() + results = append(results, "jobs") + resultsMutex.Unlock() + }() + + wg.Wait() + + // Both operations should complete (order doesn't matter) + assert.Len(t, results, 2, "Both operations on different object types should complete concurrently") +} diff --git a/internal/providers/pluginfw/products/permissions/permission_entity.go b/internal/providers/pluginfw/products/permissions/permission_entity.go new file mode 100644 index 0000000000..4fa0b2efde --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/permission_entity.go @@ -0,0 +1,59 @@ +package permissions + +import "github.com/databricks/databricks-sdk-go/service/iam" + +// PermissionEntity is the entity used for singular databricks_permission resource +// It represents permissions for a single principal on a single object +// Note: Currently not used, the resource uses PermissionResourceModel directly +type PermissionEntity struct { + // Object type - computed + ObjectType string `json:"object_type,omitempty" tf:"computed"` + + // Principal identifiers - exactly one required + UserName string `json:"user_name,omitempty"` + GroupName string `json:"group_name,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + + // Permission level for this principal + PermissionLevel iam.PermissionLevel `json:"permission_level"` +} + +// GetPrincipalName returns the principal identifier from the entity +func (p PermissionEntity) GetPrincipalName() string { + if p.UserName != "" { + return p.UserName + } + if p.GroupName != "" { + return p.GroupName + } + if p.ServicePrincipalName != "" { + return p.ServicePrincipalName + } + return "" +} + +// ToAccessControlRequest converts PermissionEntity to AccessControlRequest for API calls +func (p PermissionEntity) ToAccessControlRequest() iam.AccessControlRequest { + return iam.AccessControlRequest{ + UserName: p.UserName, + GroupName: p.GroupName, + ServicePrincipalName: p.ServicePrincipalName, + PermissionLevel: p.PermissionLevel, + } +} + +// FromAccessControlResponse creates a PermissionEntity from an AccessControlResponse +func FromAccessControlResponse(acr iam.AccessControlResponse) PermissionEntity { + // Get the highest permission level from AllPermissions + var permissionLevel iam.PermissionLevel + if len(acr.AllPermissions) > 0 { + permissionLevel = acr.AllPermissions[0].PermissionLevel + } + + return PermissionEntity{ + UserName: acr.UserName, + GroupName: acr.GroupName, + ServicePrincipalName: acr.ServicePrincipalName, + PermissionLevel: permissionLevel, + } +} diff --git a/internal/providers/pluginfw/products/permissions/permission_level_validator.go b/internal/providers/pluginfw/products/permissions/permission_level_validator.go new file mode 100644 index 0000000000..e44e3845aa --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/permission_level_validator.go @@ -0,0 +1,93 @@ +package permissions + +import ( + "context" + "fmt" + + "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// permissionLevelValidator validates that the permission_level is valid for the configured object type. +// It uses a hybrid approach: +// 1. If the object type is known in permission_definitions.go, validate against allowed levels +// 2. If the object type is unknown (new type), skip validation and let the API handle it +type permissionLevelValidator struct{} + +func (v permissionLevelValidator) Description(ctx context.Context) string { + return "validates that the permission level is valid for the configured object type when the object type is known" +} + +func (v permissionLevelValidator) MarkdownDescription(ctx context.Context) string { + return "validates that the permission level is valid for the configured object type when the object type is known" +} + +func (v permissionLevelValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if req.ConfigValue.IsNull() || req.ConfigValue.IsUnknown() { + return + } + + permissionLevel := req.ConfigValue.ValueString() + + // Get the object_type from the configuration + var objectType types.String + diags := req.Config.GetAttribute(ctx, path.Root("object_type"), &objectType) + if diags.HasError() || objectType.IsNull() || objectType.IsUnknown() { + // Can't validate without object_type, let the API handle it + return + } + + objectTypeValue := objectType.ValueString() + + // Try to find the permission mapping for this object type + allPermissions := permissions.AllResourcePermissions() + var mapping permissions.WorkspaceObjectPermissions + var found bool + + for _, m := range allPermissions { + if m.GetRequestObjectType() == objectTypeValue { + mapping = m + found = true + break + } + } + + if !found { + // Object type not found in our definitions - this might be a new object type + // Let the API handle validation + return + } + + // Get allowed permission levels for this object type + allowedLevels := mapping.GetAllowedPermissionLevels(true) // true = include non-management permissions + + // Check if the configured permission level is allowed + isValid := false + for _, allowedLevel := range allowedLevels { + if allowedLevel == permissionLevel { + isValid = true + break + } + } + + if !isValid { + resp.Diagnostics.AddAttributeError( + req.Path, + "Invalid Permission Level", + fmt.Sprintf( + "Permission level %q is not valid for object type %q. Allowed levels: %v", + permissionLevel, + objectTypeValue, + allowedLevels, + ), + ) + } +} + +// ValidatePermissionLevel returns a validator that checks if the permission level is valid for the object type. +// Uses a hybrid approach: validates against known object types, lets API handle unknown ones. +func ValidatePermissionLevel() validator.String { + return permissionLevelValidator{} +} diff --git a/internal/providers/pluginfw/products/permissions/permission_level_validator_test.go b/internal/providers/pluginfw/products/permissions/permission_level_validator_test.go new file mode 100644 index 0000000000..0c74a9871a --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/permission_level_validator_test.go @@ -0,0 +1,190 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/stretchr/testify/assert" +) + +func TestPermissionLevelValidator_KnownObjectType(t *testing.T) { + v := permissionLevelValidator{} + + tests := []struct { + name string + objectType string + permissionLevel string + expectError bool + errorContains string + }{ + { + name: "valid cluster permission", + objectType: "clusters", + permissionLevel: "CAN_ATTACH_TO", + expectError: false, + }, + { + name: "valid cluster permission - CAN_RESTART", + objectType: "clusters", + permissionLevel: "CAN_RESTART", + expectError: false, + }, + { + name: "invalid cluster permission", + objectType: "clusters", + permissionLevel: "INVALID_PERMISSION", + expectError: true, + errorContains: "not valid for object type", + }, + { + name: "valid job permission", + objectType: "jobs", + permissionLevel: "CAN_VIEW", + expectError: false, + }, + { + name: "valid authorization permission", + objectType: "authorization", + permissionLevel: "CAN_USE", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a test config + configValues := map[string]tftypes.Value{ + "object_type": tftypes.NewValue(tftypes.String, tt.objectType), + "permission_level": tftypes.NewValue(tftypes.String, tt.permissionLevel), + } + + config := tfsdk.Config{ + Schema: schema.Schema{ + Attributes: map[string]schema.Attribute{ + "object_type": schema.StringAttribute{ + Required: true, + }, + "permission_level": schema.StringAttribute{ + Required: true, + }, + }, + }, + Raw: tftypes.NewValue(tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + "object_type": tftypes.String, + "permission_level": tftypes.String, + }, + }, configValues), + } + + req := validator.StringRequest{ + Path: path.Root("permission_level"), + ConfigValue: types.StringValue(tt.permissionLevel), + Config: config, + } + + resp := &validator.StringResponse{} + + v.ValidateString(context.Background(), req, resp) + + if tt.expectError { + assert.True(t, resp.Diagnostics.HasError(), "Expected error but got none") + if tt.errorContains != "" { + assert.Contains(t, resp.Diagnostics.Errors()[0].Detail(), tt.errorContains) + } + } else { + assert.False(t, resp.Diagnostics.HasError(), "Expected no error but got: %v", resp.Diagnostics.Errors()) + } + }) + } +} + +func TestPermissionLevelValidator_UnknownObjectType(t *testing.T) { + v := permissionLevelValidator{} + + // Test with an unknown object type - should not error (let API handle it) + configValues := map[string]tftypes.Value{ + "object_type": tftypes.NewValue(tftypes.String, "new-unknown-type"), + "permission_level": tftypes.NewValue(tftypes.String, "SOME_PERMISSION"), + } + + config := tfsdk.Config{ + Schema: schema.Schema{ + Attributes: map[string]schema.Attribute{ + "object_type": schema.StringAttribute{ + Required: true, + }, + "permission_level": schema.StringAttribute{ + Required: true, + }, + }, + }, + Raw: tftypes.NewValue(tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + "object_type": tftypes.String, + "permission_level": tftypes.String, + }, + }, configValues), + } + + req := validator.StringRequest{ + Path: path.Root("permission_level"), + ConfigValue: types.StringValue("SOME_PERMISSION"), + Config: config, + } + + resp := &validator.StringResponse{} + + v.ValidateString(context.Background(), req, resp) + + // Should NOT error - let API handle validation for unknown types + assert.False(t, resp.Diagnostics.HasError(), "Should not error for unknown object type") +} + +func TestPermissionLevelValidator_MissingObjectType(t *testing.T) { + v := permissionLevelValidator{} + + // Test with null object_type - should not error (let API handle it) + configValues := map[string]tftypes.Value{ + "object_type": tftypes.NewValue(tftypes.String, nil), + "permission_level": tftypes.NewValue(tftypes.String, "CAN_USE"), + } + + config := tfsdk.Config{ + Schema: schema.Schema{ + Attributes: map[string]schema.Attribute{ + "object_type": schema.StringAttribute{ + Required: true, + }, + "permission_level": schema.StringAttribute{ + Required: true, + }, + }, + }, + Raw: tftypes.NewValue(tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + "object_type": tftypes.String, + "permission_level": tftypes.String, + }, + }, configValues), + } + + req := validator.StringRequest{ + Path: path.Root("permission_level"), + ConfigValue: types.StringValue("CAN_USE"), + Config: config, + } + + resp := &validator.StringResponse{} + + v.ValidateString(context.Background(), req, resp) + + // Should NOT error - let API handle validation when object_type is missing + assert.False(t, resp.Diagnostics.HasError(), "Should not error when object_type is null") +} diff --git a/internal/providers/pluginfw/products/permissions/resource_permission.go b/internal/providers/pluginfw/products/permissions/resource_permission.go new file mode 100644 index 0000000000..158d63e6fe --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/resource_permission.go @@ -0,0 +1,461 @@ +package permissions + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const resourceName = "permission" + +var _ resource.ResourceWithConfigure = &PermissionResource{} +var _ resource.ResourceWithImportState = &PermissionResource{} + +func ResourcePermission() resource.Resource { + return &PermissionResource{} +} + +type PermissionResource struct { + Client *common.DatabricksClient +} + +// PermissionResourceModel represents the Terraform resource model +type PermissionResourceModel struct { + ID types.String `tfsdk:"id"` + ObjectType types.String `tfsdk:"object_type"` + ObjectID types.String `tfsdk:"object_id"` + + // Principal identifiers - exactly one required + UserName types.String `tfsdk:"user_name"` + GroupName types.String `tfsdk:"group_name"` + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + + // Permission level + PermissionLevel types.String `tfsdk:"permission_level"` +} + +func (r *PermissionResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(resourceName) +} + +func (r *PermissionResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Manages permissions for a single principal on a Databricks object. " + + "This resource is authoritative for the specified object-principal pair only. " + + "Use `databricks_permissions` for managing all principals on an object at once.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "object_type": schema.StringAttribute{ + Required: true, + Description: "The type of object to manage permissions for (e.g., 'clusters', 'jobs', 'notebooks', 'authorization'). See the Databricks Permissions API documentation for the full list of supported object types.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "object_id": schema.StringAttribute{ + Required: true, + Description: "The ID of the object to manage permissions for. The format depends on the object type.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + // Principal identifiers - exactly one required, mutually exclusive + "user_name": schema.StringAttribute{ + Optional: true, + Description: "User name of the principal. Conflicts with group_name and service_principal_name.", + Validators: []validator.String{ + stringvalidator.ConflictsWith( + path.MatchRoot("group_name"), + path.MatchRoot("service_principal_name"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "group_name": schema.StringAttribute{ + Optional: true, + Description: "Group name of the principal. Conflicts with user_name and service_principal_name.", + Validators: []validator.String{ + stringvalidator.ConflictsWith( + path.MatchRoot("user_name"), + path.MatchRoot("service_principal_name"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "service_principal_name": schema.StringAttribute{ + Optional: true, + Description: "Service principal name. Conflicts with user_name and group_name.", + Validators: []validator.String{ + stringvalidator.ConflictsWith( + path.MatchRoot("user_name"), + path.MatchRoot("group_name"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + // Permission level + "permission_level": schema.StringAttribute{ + Required: true, + Description: "Permission level for the principal on the object (e.g., CAN_MANAGE, CAN_USE, CAN_VIEW). See the Databricks Permissions API documentation for valid permission levels for each object type.", + Validators: []validator.String{ + ValidatePermissionLevel(), + }, + }, + }, + } +} + +func (r *PermissionResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*common.DatabricksClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *common.DatabricksClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + r.Client = client +} + +func (r *PermissionResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan PermissionResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + r.upsertPermission(ctx, &plan, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + // Set computed fields + principal := r.getPrincipalFromModel(&plan) + plan.ID = types.StringValue(fmt.Sprintf("%s/%s/%s", plan.ObjectType.ValueString(), plan.ObjectID.ValueString(), principal)) + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *PermissionResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state PermissionResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Parse the ID to get object type, object ID and principal + objectType, objectID, principal, err := r.parseID(state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to parse resource ID", err.Error()) + return + } + + // Read current permissions + perms, err := w.Permissions.Get(ctx, iam.GetPermissionRequest{ + RequestObjectId: objectID, + RequestObjectType: objectType, + }) + if err != nil { + // If the object or permissions are not found, remove from state to trigger recreation + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + return + } + resp.Diagnostics.AddError("Failed to read permissions", err.Error()) + return + } + + // Filter for the specific principal + found := false + for _, acl := range perms.AccessControlList { + if r.matchesPrincipal(acl, principal) { + // Update the state with the current permission level + if len(acl.AllPermissions) > 0 { + state.PermissionLevel = types.StringValue(string(acl.AllPermissions[0].PermissionLevel)) + found = true + break + } + } + } + + if !found { + // Permission for this specific principal no longer exists, remove from state to trigger recreation + resp.State.RemoveResource(ctx) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *PermissionResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan PermissionResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + r.upsertPermission(ctx, &plan, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *PermissionResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state PermissionResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + objectType := state.ObjectType.ValueString() + objectID := state.ObjectID.ValueString() + principal := r.getPrincipalFromModel(&state) + + // Lock the object to prevent concurrent modifications + // This is CRITICAL for Delete to avoid race conditions when multiple + // permission resources for the same object are deleted concurrently + lockObject(objectType, objectID) + defer unlockObject(objectType, objectID) + + // Read current permissions to see what to remove + currentPerms, err := w.Permissions.Get(ctx, iam.GetPermissionRequest{ + RequestObjectId: objectID, + RequestObjectType: objectType, + }) + if err != nil { + // If the object or permissions are not found, the permission is already gone + // This is the desired state, so we can return successfully + if apierr.IsMissing(err) { + return + } + resp.Diagnostics.AddError("Failed to read current permissions", err.Error()) + return + } + + // Build a list of all permissions EXCEPT the one we're deleting + var remainingACLs []iam.AccessControlRequest + for _, acl := range currentPerms.AccessControlList { + if !r.matchesPrincipal(acl, principal) { + // Keep this ACL + if len(acl.AllPermissions) > 0 { + remainingACLs = append(remainingACLs, iam.AccessControlRequest{ + UserName: acl.UserName, + GroupName: acl.GroupName, + ServicePrincipalName: acl.ServicePrincipalName, + PermissionLevel: acl.AllPermissions[0].PermissionLevel, + }) + } + } + } + + // Use Set to replace all permissions (effectively removing the specified principal) + _, err = w.Permissions.Set(ctx, iam.SetObjectPermissions{ + RequestObjectId: objectID, + RequestObjectType: objectType, + AccessControlList: remainingACLs, + }) + if err != nil { + // If the object or principal doesn't exist, the permission is already gone + // This can happen if the underlying object or principal was deleted outside of Terraform + if apierr.IsMissing(err) { + return + } + resp.Diagnostics.AddError("Failed to delete permission", err.Error()) + return + } +} + +func (r *PermissionResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Import ID format: // + // Example: clusters/cluster-123/user@example.com + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Parse the import ID + objectType, objectID, principal, err := r.parseID(req.ID) + if err != nil { + resp.Diagnostics.AddError( + "Invalid Import ID Format", + fmt.Sprintf("Expected format: //. Error: %s", err.Error()), + ) + return + } + + // Read current permissions from Databricks + perms, err := w.Permissions.Get(ctx, iam.GetPermissionRequest{ + RequestObjectId: objectID, + RequestObjectType: objectType, + }) + if err != nil { + resp.Diagnostics.AddError("Failed to read permissions", err.Error()) + return + } + + // Find the specific principal's permission + var found bool + var state PermissionResourceModel + state.ID = types.StringValue(req.ID) + state.ObjectType = types.StringValue(objectType) + state.ObjectID = types.StringValue(objectID) + + for _, acl := range perms.AccessControlList { + if r.matchesPrincipal(acl, principal) { + if len(acl.AllPermissions) > 0 { + state.PermissionLevel = types.StringValue(string(acl.AllPermissions[0].PermissionLevel)) + + // Set principal fields - use null for empty strings + if acl.UserName != "" { + state.UserName = types.StringValue(acl.UserName) + } else { + state.UserName = types.StringNull() + } + + if acl.GroupName != "" { + state.GroupName = types.StringValue(acl.GroupName) + } else { + state.GroupName = types.StringNull() + } + + if acl.ServicePrincipalName != "" { + state.ServicePrincipalName = types.StringValue(acl.ServicePrincipalName) + } else { + state.ServicePrincipalName = types.StringNull() + } + + found = true + break + } + } + } + + if !found { + resp.Diagnostics.AddError( + "Permission Not Found", + fmt.Sprintf("No permission found for principal %q on object %s/%s", principal, objectType, objectID), + ) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +// Helper methods + +// upsertPermission creates or updates a permission for a principal on an object +func (r *PermissionResource) upsertPermission(ctx context.Context, model *PermissionResourceModel, diags *diag.Diagnostics) { + w, err := r.Client.WorkspaceClient() + if err != nil { + diags.AddError("Failed to get workspace client", err.Error()) + return + } + + objectType := model.ObjectType.ValueString() + objectID := model.ObjectID.ValueString() + + // Lock the object to prevent concurrent modifications + lockObject(objectType, objectID) + defer unlockObject(objectType, objectID) + + // Create the permission update request + permLevel := iam.PermissionLevel(model.PermissionLevel.ValueString()) + + _, err = w.Permissions.Update(ctx, iam.UpdateObjectPermissions{ + RequestObjectId: objectID, + RequestObjectType: objectType, + AccessControlList: []iam.AccessControlRequest{ + { + UserName: model.UserName.ValueString(), + GroupName: model.GroupName.ValueString(), + ServicePrincipalName: model.ServicePrincipalName.ValueString(), + PermissionLevel: permLevel, + }, + }, + }) + if err != nil { + diags.AddError("Failed to create/update permission", err.Error()) + return + } +} + +// getPrincipalFromModel extracts the principal identifier from the model +func (r *PermissionResource) getPrincipalFromModel(model *PermissionResourceModel) string { + if !model.UserName.IsNull() && model.UserName.ValueString() != "" { + return model.UserName.ValueString() + } + if !model.GroupName.IsNull() && model.GroupName.ValueString() != "" { + return model.GroupName.ValueString() + } + if !model.ServicePrincipalName.IsNull() && model.ServicePrincipalName.ValueString() != "" { + return model.ServicePrincipalName.ValueString() + } + return "" +} + +func (r *PermissionResource) matchesPrincipal(acl iam.AccessControlResponse, principal string) bool { + return acl.UserName == principal || + acl.GroupName == principal || + acl.ServicePrincipalName == principal +} + +func (r *PermissionResource) parseID(id string) (objectType string, objectID string, principal string, err error) { + // ID format: // + // Example: clusters/cluster-123/user@example.com + parts := strings.Split(id, "/") + if len(parts) < 3 { + return "", "", "", fmt.Errorf("invalid ID format: expected //, got %s", id) + } + + // Handle cases where object_id might contain slashes (e.g., notebooks paths) + // The principal is always the last part + principal = parts[len(parts)-1] + objectType = parts[0] + objectID = strings.Join(parts[1:len(parts)-1], "/") + + return objectType, objectID, principal, nil +} diff --git a/internal/providers/pluginfw/products/permissions/resource_permission_acc_test.go b/internal/providers/pluginfw/products/permissions/resource_permission_acc_test.go new file mode 100644 index 0000000000..08213db7b2 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/resource_permission_acc_test.go @@ -0,0 +1,558 @@ +package permissions_test + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" +) + +func TestAccPermission_Cluster(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + clusterTemplate := ` +data "databricks_spark_version" "latest" { +} + +resource "databricks_cluster" "this" { + cluster_name = "singlenode-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + num_workers = 0 + autotermination_minutes = 10 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } +} + +resource "databricks_group" "group1" { + display_name = "permission-group1-{var.RANDOM}" +} + +resource "databricks_group" "group2" { + display_name = "permission-group2-{var.RANDOM}" +} + +resource "databricks_permission" "cluster_group1" { + object_type = "clusters" + object_id = databricks_cluster.this.id + group_name = databricks_group.group1.display_name + permission_level = "CAN_ATTACH_TO" +} + +resource "databricks_permission" "cluster_group2" { + object_type = "clusters" + object_id = databricks_cluster.this.id + group_name = databricks_group.group2.display_name + permission_level = "CAN_RESTART" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: clusterTemplate, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.cluster_group1", "permission_level", "CAN_ATTACH_TO"), + resource.TestCheckResourceAttr("databricks_permission.cluster_group2", "permission_level", "CAN_RESTART"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + clusterId := s.RootModule().Resources["databricks_cluster.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "clusters", clusterId) + assert.NoError(t, err) + + group1Name := s.RootModule().Resources["databricks_group.group1"].Primary.Attributes["display_name"] + group2Name := s.RootModule().Resources["databricks_group.group2"].Primary.Attributes["display_name"] + + // Verify both permissions exist + foundGroup1 := false + foundGroup2 := false + for _, acl := range permissions.AccessControlList { + if acl.GroupName == group1Name { + assert.Equal(t, iam.PermissionLevelCanAttachTo, acl.AllPermissions[0].PermissionLevel) + foundGroup1 = true + } + if acl.GroupName == group2Name { + assert.Equal(t, iam.PermissionLevelCanRestart, acl.AllPermissions[0].PermissionLevel) + foundGroup2 = true + } + } + assert.True(t, foundGroup1, "Group1 permission not found") + assert.True(t, foundGroup2, "Group2 permission not found") + return nil + }, + ), + }) +} + +func TestAccPermission_Job(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_job" "this" { + name = "permission-job-{var.RANDOM}" +} + +resource "databricks_group" "viewers" { + display_name = "permission-viewers-{var.RANDOM}" +} + +resource "databricks_user" "test_user" { + user_name = "permission-test-{var.RANDOM}@example.com" + force = true +} + +resource "databricks_permission" "job_group" { + object_type = "jobs" + object_id = databricks_job.this.id + group_name = databricks_group.viewers.display_name + permission_level = "CAN_VIEW" +} + +resource "databricks_permission" "job_user" { + object_type = "jobs" + object_id = databricks_job.this.id + user_name = databricks_user.test_user.user_name + permission_level = "CAN_MANAGE_RUN" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_group", "object_type", "jobs"), + resource.TestCheckResourceAttrSet("databricks_permission.job_group", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.job_group", "permission_level", "CAN_VIEW"), + resource.TestCheckResourceAttr("databricks_permission.job_user", "object_type", "jobs"), + resource.TestCheckResourceAttrSet("databricks_permission.job_user", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.job_user", "permission_level", "CAN_MANAGE_RUN"), + ), + }) +} + +func TestAccPermission_Notebook(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_directory" "this" { + path = "/permission-test-{var.RANDOM}" +} + +resource "databricks_notebook" "this" { + source = "{var.CWD}/../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" +} + +resource "databricks_group" "editors" { + display_name = "permission-editors-{var.RANDOM}" +} + +resource "databricks_group" "runners" { + display_name = "permission-runners-{var.RANDOM}" +} + +resource "databricks_permission" "notebook_editors" { + object_type = "notebooks" + object_id = databricks_notebook.this.path + group_name = databricks_group.editors.display_name + permission_level = "CAN_EDIT" +} + +resource "databricks_permission" "notebook_runners" { + object_type = "notebooks" + object_id = databricks_notebook.this.path + group_name = databricks_group.runners.display_name + permission_level = "CAN_RUN" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.notebook_editors", "object_type", "notebooks"), + resource.TestCheckResourceAttrSet("databricks_permission.notebook_editors", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.notebook_editors", "permission_level", "CAN_EDIT"), + resource.TestCheckResourceAttr("databricks_permission.notebook_runners", "object_type", "notebooks"), + resource.TestCheckResourceAttrSet("databricks_permission.notebook_runners", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.notebook_runners", "permission_level", "CAN_RUN"), + ), + }) +} + +func TestAccPermission_Authorization_Tokens(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_group" "team_a" { + display_name = "permission-team-a-{var.STICKY_RANDOM}" +} + +resource "databricks_group" "team_b" { + display_name = "permission-team-b-{var.STICKY_RANDOM}" +} + +resource "databricks_group" "team_c" { + display_name = "permission-team-c-{var.STICKY_RANDOM}" +} + +# This demonstrates the key benefit: each team's token permissions +# can be managed independently, unlike databricks_permissions +resource "databricks_permission" "tokens_team_a" { + object_type = "authorization" + object_id = "tokens" + group_name = databricks_group.team_a.display_name + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_b" { + object_type = "authorization" + object_id = "tokens" + group_name = databricks_group.team_b.display_name + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_c" { + object_type = "authorization" + object_id = "tokens" + group_name = databricks_group.team_c.display_name + permission_level = "CAN_USE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.tokens_team_a", "object_type", "authorization"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_a", "object_id", "tokens"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_a", "permission_level", "CAN_USE"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_b", "object_type", "authorization"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_b", "object_id", "tokens"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_c", "object_type", "authorization"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_c", "object_id", "tokens"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "authorization", "tokens") + assert.NoError(t, err) + + teamA := s.RootModule().Resources["databricks_group.team_a"].Primary.Attributes["display_name"] + teamB := s.RootModule().Resources["databricks_group.team_b"].Primary.Attributes["display_name"] + teamC := s.RootModule().Resources["databricks_group.team_c"].Primary.Attributes["display_name"] + + foundA, foundB, foundC := false, false, false + for _, acl := range permissions.AccessControlList { + if acl.GroupName == teamA { + foundA = true + } + if acl.GroupName == teamB { + foundB = true + } + if acl.GroupName == teamC { + foundC = true + } + } + assert.True(t, foundA, "Team A permission not found") + assert.True(t, foundB, "Team B permission not found") + assert.True(t, foundC, "Team C permission not found") + return nil + }, + ), + }, acceptance.Step{ + // Update one permission independently - remove team_b + Template: ` +resource "databricks_group" "team_a" { + display_name = "permission-team-a-{var.STICKY_RANDOM}" +} + +resource "databricks_group" "team_b" { + display_name = "permission-team-b-{var.STICKY_RANDOM}" +} + +resource "databricks_group" "team_c" { + display_name = "permission-team-c-{var.STICKY_RANDOM}" +} + +resource "databricks_permission" "tokens_team_a" { + object_type = "authorization" + object_id = "tokens" + group_name = databricks_group.team_a.display_name + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_c" { + object_type = "authorization" + object_id = "tokens" + group_name = databricks_group.team_c.display_name + permission_level = "CAN_USE" +} +`, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "authorization", "tokens") + assert.NoError(t, err) + + teamA := s.RootModule().Resources["databricks_group.team_a"].Primary.Attributes["display_name"] + teamB := s.RootModule().Resources["databricks_group.team_b"].Primary.Attributes["display_name"] + teamC := s.RootModule().Resources["databricks_group.team_c"].Primary.Attributes["display_name"] + + foundA, foundB, foundC := false, false, false + for _, acl := range permissions.AccessControlList { + if acl.GroupName == teamA { + foundA = true + } + if acl.GroupName == teamB { + foundB = true + } + if acl.GroupName == teamC { + foundC = true + } + } + assert.True(t, foundA, "Team A permission should still exist") + assert.False(t, foundB, "Team B permission should be removed") + assert.True(t, foundC, "Team C permission should still exist") + return nil + }, + }) +} + +func TestAccPermission_Update(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template1 := ` +resource "databricks_job" "this" { + name = "permission-update-{var.RANDOM}" +} + +resource "databricks_group" "test" { + display_name = "permission-update-{var.RANDOM}" +} + +resource "databricks_permission" "job_group" { + object_type = "jobs" + object_id = databricks_job.this.id + group_name = databricks_group.test.display_name + permission_level = "CAN_VIEW" +} +` + + template2 := ` +resource "databricks_job" "this" { + name = "permission-update-{var.RANDOM}" +} + +resource "databricks_group" "test" { + display_name = "permission-update-{var.RANDOM}" +} + +resource "databricks_permission" "job_group" { + object_type = "jobs" + object_id = databricks_job.this.id + group_name = databricks_group.test.display_name + permission_level = "CAN_MANAGE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template1, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_group", "object_type", "jobs"), + resource.TestCheckResourceAttrSet("databricks_permission.job_group", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.job_group", "permission_level", "CAN_VIEW"), + ), + }, acceptance.Step{ + Template: template2, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_group", "object_type", "jobs"), + resource.TestCheckResourceAttrSet("databricks_permission.job_group", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.job_group", "permission_level", "CAN_MANAGE"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + jobId := s.RootModule().Resources["databricks_job.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "jobs", jobId) + assert.NoError(t, err) + + groupName := s.RootModule().Resources["databricks_group.test"].Primary.Attributes["display_name"] + for _, acl := range permissions.AccessControlList { + if acl.GroupName == groupName { + assert.Equal(t, iam.PermissionLevelCanManage, acl.AllPermissions[0].PermissionLevel) + return nil + } + } + return fmt.Errorf("permission not found for group %s", groupName) + }, + ), + }) +} + +func TestAccPermission_ServicePrincipal(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + if acceptance.IsGcp(t) { + acceptance.Skipf(t)("Service principals have different behavior on GCP") + } + + template := ` +resource "databricks_job" "this" { + name = "permission-sp-{var.RANDOM}" +} + +resource "databricks_service_principal" "sp" { + display_name = "permission-sp-{var.RANDOM}" +} + +resource "databricks_permission" "job_sp" { + object_type = "jobs" + object_id = databricks_job.this.id + service_principal_name = databricks_service_principal.sp.application_id + permission_level = "CAN_MANAGE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_sp", "object_type", "jobs"), + resource.TestCheckResourceAttrSet("databricks_permission.job_sp", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.job_sp", "permission_level", "CAN_MANAGE"), + resource.TestCheckResourceAttrSet("databricks_permission.job_sp", "service_principal_name"), + ), + }) +} + +func TestAccPermission_Import(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_job" "this" { + name = "permission-import-{var.RANDOM}" +} + +resource "databricks_group" "test" { + display_name = "permission-import-{var.RANDOM}" +} + +resource "databricks_permission" "job_group" { + object_type = "jobs" + object_id = databricks_job.this.id + group_name = databricks_group.test.display_name + permission_level = "CAN_VIEW" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + }, acceptance.Step{ + Template: template, + ResourceName: "databricks_permission.job_group", + ImportState: true, + ImportStateVerify: true, + }) +} + +func TestAccPermission_SqlEndpoint(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_sql_endpoint" "this" { + name = "permission-sql-{var.RANDOM}" + cluster_size = "2X-Small" + tags { + custom_tags { + key = "Owner" + value = "eng-dev-ecosystem-team_at_databricks.com" + } + } +} + +resource "databricks_group" "sql_users" { + display_name = "permission-sql-users-{var.RANDOM}" +} + +resource "databricks_permission" "warehouse_users" { + object_type = "sql/warehouses" + object_id = databricks_sql_endpoint.this.id + group_name = databricks_group.sql_users.display_name + permission_level = "CAN_USE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.warehouse_users", "object_type", "sql/warehouses"), + resource.TestCheckResourceAttrSet("databricks_permission.warehouse_users", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.warehouse_users", "permission_level", "CAN_USE"), + ), + }) +} + +func TestAccPermission_InstancePool(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +data "databricks_node_type" "smallest" { + local_disk = true +} + +resource "databricks_instance_pool" "this" { + instance_pool_name = "permission-pool-{var.RANDOM}" + min_idle_instances = 0 + max_capacity = 1 + node_type_id = data.databricks_node_type.smallest.id + idle_instance_autotermination_minutes = 10 +} + +resource "databricks_group" "pool_users" { + display_name = "permission-pool-users-{var.RANDOM}" +} + +resource "databricks_permission" "pool_access" { + object_type = "instance-pools" + object_id = databricks_instance_pool.this.id + group_name = databricks_group.pool_users.display_name + permission_level = "CAN_ATTACH_TO" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.pool_access", "object_type", "instance-pools"), + resource.TestCheckResourceAttrSet("databricks_permission.pool_access", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.pool_access", "permission_level", "CAN_ATTACH_TO"), + ), + }) +} + +func TestAccPermission_ClusterPolicy(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_cluster_policy" "this" { + name = "permission-policy-{var.RANDOM}" + definition = jsonencode({ + "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": { + "type": "fixed", + "value": "jdbc:sqlserver://" + } + }) +} + +resource "databricks_group" "policy_users" { + display_name = "permission-policy-users-{var.RANDOM}" +} + +resource "databricks_permission" "policy_access" { + object_type = "cluster-policies" + object_id = databricks_cluster_policy.this.id + group_name = databricks_group.policy_users.display_name + permission_level = "CAN_USE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.policy_access", "object_type", "cluster-policies"), + resource.TestCheckResourceAttrSet("databricks_permission.policy_access", "object_id"), + resource.TestCheckResourceAttr("databricks_permission.policy_access", "permission_level", "CAN_USE"), + ), + }) +} diff --git a/internal/providers/pluginfw/products/permissions/resource_permission_test.go b/internal/providers/pluginfw/products/permissions/resource_permission_test.go new file mode 100644 index 0000000000..3a34be6115 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/resource_permission_test.go @@ -0,0 +1,225 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPermissionResource_Schema(t *testing.T) { + r := ResourcePermission() + require.NotNil(t, r) + + var resp resource.SchemaResponse + r.Schema(context.Background(), resource.SchemaRequest{}, &resp) + + // Verify schema has required fields + require.NotNil(t, resp.Schema.Attributes) + + // Check principal fields + _, ok := resp.Schema.Attributes["user_name"] + assert.True(t, ok, "user_name should be in schema") + + _, ok = resp.Schema.Attributes["group_name"] + assert.True(t, ok, "group_name should be in schema") + + _, ok = resp.Schema.Attributes["service_principal_name"] + assert.True(t, ok, "service_principal_name should be in schema") + + // Check permission level + _, ok = resp.Schema.Attributes["permission_level"] + assert.True(t, ok, "permission_level should be in schema") + + // Check object identifier fields + _, ok = resp.Schema.Attributes["object_type"] + assert.True(t, ok, "object_type should be in schema") + + _, ok = resp.Schema.Attributes["object_id"] + assert.True(t, ok, "object_id should be in schema") + + // Check computed fields + idAttr, ok := resp.Schema.Attributes["id"] + assert.True(t, ok, "id should be in schema") + if stringAttr, ok := idAttr.(schema.StringAttribute); ok { + assert.True(t, stringAttr.Computed, "id should be computed") + } + + // Verify object_type is required + objectTypeAttr, ok := resp.Schema.Attributes["object_type"] + assert.True(t, ok, "object_type should be in schema") + if stringAttr, ok := objectTypeAttr.(schema.StringAttribute); ok { + assert.True(t, stringAttr.Required, "object_type should be required") + } + + // Verify object_id is required + objectIDAttr, ok := resp.Schema.Attributes["object_id"] + assert.True(t, ok, "object_id should be in schema") + if stringAttr, ok := objectIDAttr.(schema.StringAttribute); ok { + assert.True(t, stringAttr.Required, "object_id should be required") + } +} + +func TestPermissionResource_Metadata(t *testing.T) { + r := ResourcePermission() + var resp resource.MetadataResponse + r.Metadata(context.Background(), resource.MetadataRequest{ + ProviderTypeName: "databricks", + }, &resp) + + assert.Equal(t, "databricks_permission", resp.TypeName) +} + +func TestPermissionResource_ParseID(t *testing.T) { + r := &PermissionResource{} + + tests := []struct { + name string + id string + wantObjectType string + wantObjectID string + wantPrincipal string + wantError bool + }{ + { + name: "cluster permission", + id: "clusters/test-cluster-id/user@example.com", + wantObjectType: "clusters", + wantObjectID: "test-cluster-id", + wantPrincipal: "user@example.com", + wantError: false, + }, + { + name: "job permission", + id: "jobs/123/test-group", + wantObjectType: "jobs", + wantObjectID: "123", + wantPrincipal: "test-group", + wantError: false, + }, + { + name: "authorization tokens", + id: "authorization/tokens/developers", + wantObjectType: "authorization", + wantObjectID: "tokens", + wantPrincipal: "developers", + wantError: false, + }, + { + name: "notebook with path containing slashes", + id: "notebooks/Shared/Team/notebook.py/user@example.com", + wantObjectType: "notebooks", + wantObjectID: "Shared/Team/notebook.py", + wantPrincipal: "user@example.com", + wantError: false, + }, + { + name: "invalid format - too few parts", + id: "clusters/test-cluster-id", + wantError: true, + }, + { + name: "invalid format - no slashes", + id: "test-id", + wantError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotObjectType, gotObjectID, gotPrincipal, err := r.parseID(tt.id) + if tt.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.wantObjectType, gotObjectType) + assert.Equal(t, tt.wantObjectID, gotObjectID) + assert.Equal(t, tt.wantPrincipal, gotPrincipal) + } + }) + } +} + +func TestPermissionResource_Configure(t *testing.T) { + r := &PermissionResource{} + client := &common.DatabricksClient{} + + var resp resource.ConfigureResponse + r.Configure(context.Background(), resource.ConfigureRequest{ + ProviderData: client, + }, &resp) + + assert.False(t, resp.Diagnostics.HasError()) + assert.Equal(t, client, r.Client) +} + +func TestPermissionResource_ImportState(t *testing.T) { + // Verify the resource implements the ImportState interface + var _ resource.ResourceWithImportState = &PermissionResource{} + + // Test that parseID correctly handles import ID format + r := &PermissionResource{} + + tests := []struct { + name string + importID string + expectedObjType string + expectedObjID string + expectedPrinc string + expectError bool + }{ + { + name: "valid cluster import", + importID: "clusters/cluster-123/user@example.com", + expectedObjType: "clusters", + expectedObjID: "cluster-123", + expectedPrinc: "user@example.com", + expectError: false, + }, + { + name: "valid job import", + importID: "jobs/job-456/data-engineers", + expectedObjType: "jobs", + expectedObjID: "job-456", + expectedPrinc: "data-engineers", + expectError: false, + }, + { + name: "valid authorization import", + importID: "authorization/tokens/team-a", + expectedObjType: "authorization", + expectedObjID: "tokens", + expectedPrinc: "team-a", + expectError: false, + }, + { + name: "invalid format - too few parts", + importID: "clusters/cluster-123", + expectError: true, + }, + { + name: "invalid format - no slashes", + importID: "cluster-123-user", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objectType, objectID, principal, err := r.parseID(tt.importID) + + if tt.expectError { + assert.Error(t, err, "Expected error for invalid import ID") + } else { + assert.NoError(t, err, "Expected no error for valid import ID") + assert.Equal(t, tt.expectedObjType, objectType, "Object type should match") + assert.Equal(t, tt.expectedObjID, objectID, "Object ID should match") + assert.Equal(t, tt.expectedPrinc, principal, "Principal should match") + } + }) + } +} diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go index a15ef91675..7fdeea6c7a 100644 --- a/permissions/permission_definitions.go +++ b/permissions/permission_definitions.go @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" ) -// resourcePermissions captures all the information needed to manage permissions for a given object type. -type resourcePermissions struct { +// WorkspaceObjectPermissions captures all the information needed to manage permissions for a given object type. +type WorkspaceObjectPermissions struct { // Mandatory Fields // The attribute name that users configure with the ID of the object to manage @@ -77,8 +77,8 @@ type resourcePermissions struct { fetchObjectCreator func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) } -// getAllowedPermissionLevels returns the list of permission levels that are allowed for this resource type. -func (p resourcePermissions) getAllowedPermissionLevels(includeNonManagementPermissions bool) []string { +// GetAllowedPermissionLevels returns the list of permission levels that are allowed for this resource type. +func (p WorkspaceObjectPermissions) GetAllowedPermissionLevels(includeNonManagementPermissions bool) []string { levels := make([]string, 0, len(p.allowedPermissionLevels)) for level := range p.allowedPermissionLevels { if includeNonManagementPermissions || p.allowedPermissionLevels[level].isManagementPermission { @@ -99,7 +99,7 @@ type resourceStatus struct { // getObjectStatus returns the creator of the object and whether the object exists. If the object creator cannot be determined for this // resource type, an empty string is returned. Resources without fetchObjectCreator are assumed to exist and have an unknown creator. -func (p resourcePermissions) getObjectStatus(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (resourceStatus, error) { +func (p WorkspaceObjectPermissions) getObjectStatus(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (resourceStatus, error) { if p.fetchObjectCreator != nil { creator, err := p.fetchObjectCreator(ctx, w, objectID) if err != nil { @@ -111,7 +111,7 @@ func (p resourcePermissions) getObjectStatus(ctx context.Context, w *databricks. } // getPathVariant returns the name of the path attribute for this resource type. -func (p resourcePermissions) getPathVariant() string { +func (p WorkspaceObjectPermissions) getPathVariant() string { if p.pathVariant != "" { return p.pathVariant } @@ -119,7 +119,7 @@ func (p resourcePermissions) getPathVariant() string { } // validate checks that the user is not trying to set permissions for the admin group or remove their own management permissions. -func (p resourcePermissions) validate(ctx context.Context, entity entity.PermissionsEntity, currentUsername string) error { +func (p WorkspaceObjectPermissions) validate(ctx context.Context, entity entity.PermissionsEntity, currentUsername string) error { for _, change := range entity.AccessControlList { // Prevent users from setting permissions for admins. if change.GroupName == "admins" && !p.allowConfiguringAdmins { @@ -128,7 +128,7 @@ func (p resourcePermissions) validate(ctx context.Context, entity entity.Permiss // Check that the user is preventing themselves from managing the object level := p.allowedPermissionLevels[string(change.PermissionLevel)] if (change.UserName == currentUsername || change.ServicePrincipalName == currentUsername) && !level.isManagementPermission { - allowedLevelsForCurrentUser := p.getAllowedPermissionLevels(false) + allowedLevelsForCurrentUser := p.GetAllowedPermissionLevels(false) return fmt.Errorf("cannot remove management permissions for the current user for %s, allowed levels: %s", p.objectType, strings.Join(allowedLevelsForCurrentUser, ", ")) } @@ -140,7 +140,7 @@ func (p resourcePermissions) validate(ctx context.Context, entity entity.Permiss } // getID returns the object ID for the given user-specified ID. -func (p resourcePermissions) getID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { +func (p WorkspaceObjectPermissions) getID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { var err error if p.idRetriever != nil { id, err = p.idRetriever(ctx, w, id) @@ -152,7 +152,7 @@ func (p resourcePermissions) getID(ctx context.Context, w *databricks.WorkspaceC } // prepareForUpdate prepares the access control list for an update request by calling all update customizers. -func (p resourcePermissions) prepareForUpdate(objectID string, e entity.PermissionsEntity, currentUser string) (entity.PermissionsEntity, error) { +func (p WorkspaceObjectPermissions) prepareForUpdate(objectID string, e entity.PermissionsEntity, currentUser string) (entity.PermissionsEntity, error) { cachedCurrentUser := func() (string, error) { return currentUser, nil } ctx := update.ACLCustomizerContext{ GetCurrentUser: cachedCurrentUser, @@ -169,7 +169,7 @@ func (p resourcePermissions) prepareForUpdate(objectID string, e entity.Permissi } // prepareForDelete prepares the access control list for a delete request by calling all delete customizers. -func (p resourcePermissions) prepareForDelete(objectACL *iam.ObjectPermissions, getCurrentUser func() (string, error)) ([]iam.AccessControlRequest, error) { +func (p WorkspaceObjectPermissions) prepareForDelete(objectACL *iam.ObjectPermissions, getCurrentUser func() (string, error)) ([]iam.AccessControlRequest, error) { accl := make([]iam.AccessControlRequest, 0, len(objectACL.AccessControlList)) // By default, only admins have access to a resource when databricks_permissions for that resource are deleted. for _, acl := range objectACL.AccessControlList { @@ -211,7 +211,7 @@ func (p resourcePermissions) prepareForDelete(objectACL *iam.ObjectPermissions, // For example, the SQL API previously used CAN_VIEW for read-only permission, but the GA API uses CAN_READ. Users may // have CAN_VIEW in their resource configuration, so the read customizer will rewrite the response from CAN_READ to // CAN_VIEW to match the user's configuration. -func (p resourcePermissions) prepareResponse(objectID string, objectACL *iam.ObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { +func (p WorkspaceObjectPermissions) prepareResponse(objectID string, objectACL *iam.ObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { ctx := read.ACLCustomizerContext{ GetId: func() string { return objectID }, GetExistingPermissionsEntity: func() entity.PermissionsEntity { return existing }, @@ -254,7 +254,7 @@ func (p resourcePermissions) prepareResponse(objectID string, objectACL *iam.Obj } // addOwnerPermissionIfNeeded adds the owner permission to the object ACL if the owner permission is allowed and not already set. -func (p resourcePermissions) addOwnerPermissionIfNeeded(objectACL []iam.AccessControlRequest, ownerOpt string) []iam.AccessControlRequest { +func (p WorkspaceObjectPermissions) addOwnerPermissionIfNeeded(objectACL []iam.AccessControlRequest, ownerOpt string) []iam.AccessControlRequest { _, ok := p.allowedPermissionLevels["IS_OWNER"] if !ok { return objectACL @@ -286,7 +286,8 @@ type permissionLevelOptions struct { deprecated string } -func getResourcePermissionsFromId(id string) (resourcePermissions, error) { +// GetResourcePermissionsFromId returns the resource permissions mapping for a given ID. +func GetResourcePermissionsFromId(id string) (WorkspaceObjectPermissions, error) { idParts := strings.Split(id, "/") objectType := strings.Join(idParts[1:len(idParts)-1], "/") for _, mapping := range allResourcePermissions() { @@ -300,11 +301,11 @@ func getResourcePermissionsFromId(id string) (resourcePermissions, error) { return mapping, nil } } - return resourcePermissions{}, fmt.Errorf("resource type for %s not found", id) + return WorkspaceObjectPermissions{}, fmt.Errorf("resource type for %s not found", id) } -// getResourcePermissionsFromState returns the resourcePermissions for the given state. -func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) (resourcePermissions, string, error) { +// getResourcePermissionsFromState returns the WorkspaceObjectPermissions for the given state. +func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) (WorkspaceObjectPermissions, string, error) { allPermissions := allResourcePermissions() for _, mapping := range allPermissions { if v, ok := d.GetOk(mapping.field); ok { @@ -325,12 +326,61 @@ func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) ( allFields = append(allFields, mapping.field) } sort.Strings(allFields) - return resourcePermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) + return WorkspaceObjectPermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) } -// getResourcePermissionsForObjectAcl returns the resourcePermissions for the given ObjectAclApiResponse. -// allResourcePermissions is the list of all resource types that can be managed by the databricks_permissions resource. -func allResourcePermissions() []resourcePermissions { +// GetRequestObjectType returns the request object type for the permission mapping +func (p WorkspaceObjectPermissions) GetRequestObjectType() string { + return p.requestObjectType +} + +// GetObjectType returns the object type for the permission mapping +func (p WorkspaceObjectPermissions) GetObjectType() string { + return p.objectType +} + +// GetField returns the field name for the permission mapping +func (p WorkspaceObjectPermissions) GetField() string { + return p.field +} + +// GetID returns the object ID for the given user-specified ID +func (p WorkspaceObjectPermissions) GetID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { + return p.getID(ctx, w, id) +} + +// GetResourcePermissionsFromFieldValue finds the appropriate resource permissions mapping +// by checking which field is set in the provided map +func GetResourcePermissionsFromFieldValue(fieldValues map[string]string) (WorkspaceObjectPermissions, string, error) { + allPermissions := allResourcePermissions() + for _, mapping := range allPermissions { + if val, ok := fieldValues[mapping.field]; ok && val != "" { + if mapping.stateMatcher != nil && !mapping.stateMatcher(val) { + continue + } + return mapping, val, nil + } + } + allFields := make([]string, 0, len(allPermissions)) + seen := make(map[string]struct{}) + for _, mapping := range allPermissions { + if _, ok := seen[mapping.field]; ok { + continue + } + seen[mapping.field] = struct{}{} + allFields = append(allFields, mapping.field) + } + sort.Strings(allFields) + return WorkspaceObjectPermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) +} + +// AllResourcePermissions returns all permission type definitions. +// Exported for use by Plugin Framework resources to dynamically generate schemas. +func AllResourcePermissions() []WorkspaceObjectPermissions { + return allResourcePermissions() +} + +func allResourcePermissions() []WorkspaceObjectPermissions { PATH := func(ctx context.Context, w *databricks.WorkspaceClient, path string) (string, error) { info, err := w.Workspace.GetStatusByPath(ctx, path) if err != nil { @@ -344,7 +394,7 @@ func allResourcePermissions() []resourcePermissions { rewriteCanReadToCanView := read.RewritePermissions(map[iam.PermissionLevel]iam.PermissionLevel{ iam.PermissionLevelCanRead: iam.PermissionLevelCanView, }) - return []resourcePermissions{ + return []WorkspaceObjectPermissions{ { field: "cluster_policy_id", objectType: "cluster-policy", diff --git a/permissions/resource_permissions.go b/permissions/resource_permissions.go index b6acced75c..1f80d725c1 100644 --- a/permissions/resource_permissions.go +++ b/permissions/resource_permissions.go @@ -30,7 +30,7 @@ type PermissionsAPI struct { } // safePutWithOwner is a workaround for the limitation where warehouse without owners cannot have IS_OWNER set -func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL []iam.AccessControlRequest, mapping resourcePermissions, ownerOpt string) error { +func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL []iam.AccessControlRequest, mapping WorkspaceObjectPermissions, ownerOpt string) error { w, err := a.client.WorkspaceClient() if err != nil { return err @@ -69,7 +69,7 @@ func (a PermissionsAPI) getCurrentUser() (string, error) { } // Update updates object permissions. Technically, it's using method named SetOrDelete, but here we do more -func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, mapping resourcePermissions) error { +func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, mapping WorkspaceObjectPermissions) error { currentUser, err := a.getCurrentUser() if err != nil { return err @@ -91,7 +91,7 @@ func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, // Delete gracefully removes permissions of non-admin users. After this operation, the object is managed // by the current user and admin group. If the resource has IS_OWNER permissions, they are reset to the // object creator, if it can be determined. -func (a PermissionsAPI) Delete(objectID string, mapping resourcePermissions) error { +func (a PermissionsAPI) Delete(objectID string, mapping WorkspaceObjectPermissions) error { if mapping.objectType == "pipelines" { // There is a bug which causes the code below send IS_OWNER with run_as identity // Which is of course wrong thing to do. @@ -122,7 +122,7 @@ func (a PermissionsAPI) Delete(objectID string, mapping resourcePermissions) err return a.safePutWithOwner(objectID, accl, mapping, resourceStatus.creator) } -func (a PermissionsAPI) readRaw(objectID string, mapping resourcePermissions) (*iam.ObjectPermissions, error) { +func (a PermissionsAPI) readRaw(objectID string, mapping WorkspaceObjectPermissions) (*iam.ObjectPermissions, error) { w, err := a.client.WorkspaceClient() if err != nil { return nil, err @@ -156,7 +156,7 @@ func (a PermissionsAPI) readRaw(objectID string, mapping resourcePermissions) (* } // Read gets all relevant permissions for the object, including inherited ones -func (a PermissionsAPI) Read(objectID string, mapping resourcePermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { +func (a PermissionsAPI) Read(objectID string, mapping WorkspaceObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { permissions, err := a.readRaw(objectID, mapping) if err != nil { return entity.PermissionsEntity{}, err @@ -164,7 +164,7 @@ func (a PermissionsAPI) Read(objectID string, mapping resourcePermissions, exist return mapping.prepareResponse(objectID, permissions, existing, me) } -// ResourcePermissions definition +// ResourcePermissions is the SDKv2 resource definition for databricks_permissions func ResourcePermissions() common.Resource { s := common.StructToSchema(entity.PermissionsEntity{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { for _, mapping := range allResourcePermissions() { @@ -231,14 +231,14 @@ func ResourcePermissions() common.Resource { // is not aware of. if _, ok := mapping.allowedPermissionLevels[string(permissionLevel)]; !ok { return fmt.Errorf(`permission_level %s is not supported with %s objects; allowed levels: %s`, - permissionLevel, mapping.field, strings.Join(mapping.getAllowedPermissionLevels(true), ", ")) + permissionLevel, mapping.field, strings.Join(mapping.GetAllowedPermissionLevels(true), ", ")) } } return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { a := NewPermissionsAPI(ctx, c) - mapping, err := getResourcePermissionsFromId(d.Id()) + mapping, err := GetResourcePermissionsFromId(d.Id()) if err != nil { return err } @@ -293,14 +293,14 @@ func ResourcePermissions() common.Resource { Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { var entity entity.PermissionsEntity common.DataToStructPointer(d, s, &entity) - mapping, err := getResourcePermissionsFromId(d.Id()) + mapping, err := GetResourcePermissionsFromId(d.Id()) if err != nil { return err } return NewPermissionsAPI(ctx, c).Update(d.Id(), entity, mapping) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - mapping, err := getResourcePermissionsFromId(d.Id()) + mapping, err := GetResourcePermissionsFromId(d.Id()) if err != nil { return err } diff --git a/permissions/resource_permissions_test.go b/permissions/resource_permissions_test.go index ffcc1923fc..08fc3e5be5 100644 --- a/permissions/resource_permissions_test.go +++ b/permissions/resource_permissions_test.go @@ -1329,7 +1329,7 @@ func TestResourcePermissionsUpdate(t *testing.T) { assert.Equal(t, "CAN_VIEW", firstElem["permission_level"]) } -func getResourcePermissions(field, objectType string) resourcePermissions { +func getResourcePermissions(field, objectType string) WorkspaceObjectPermissions { for _, mapping := range allResourcePermissions() { if mapping.field == field && mapping.objectType == objectType { return mapping