Skip to content

fix kafka + add kafka_schema_registry attribute #623

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions ovh/data_cloud_project_database.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,11 @@ func dataSourceCloudProjectDatabase() *schema.Resource {
Description: "Defines whether the REST API is enabled on a Kafka cluster",
Computed: true,
},
"kafka_schema_registry": {
Type: schema.TypeBool,
Description: "Defines whether the schema registry is enabled on a Kafka cluster",
Computed: true,
},
"maintenance_time": {
Type: schema.TypeString,
Description: "Time on which maintenances can start every day",
Expand Down Expand Up @@ -232,16 +237,16 @@ func dataSourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceD
nodesEndpoint := fmt.Sprintf("%s/node", serviceEndpoint)
nodeList := &[]string{}
if err := config.OVHClient.GetWithContext(ctx, nodesEndpoint, nodeList); err != nil {
return diag.Errorf("unable to get database %s nodes: %v", res.Id, err)
return diag.Errorf("unable to get database %s nodes: %v", res.ID, err)
}

if len(*nodeList) == 0 {
return diag.Errorf("no node found for database %s", res.Id)
return diag.Errorf("no node found for database %s", res.ID)
}
nodeEndpoint := fmt.Sprintf("%s/%s", nodesEndpoint, url.PathEscape((*nodeList)[0]))
node := &CloudProjectDatabaseNodes{}
if err := config.OVHClient.GetWithContext(ctx, nodeEndpoint, node); err != nil {
return diag.Errorf("unable to get database %s node %s: %v", res.Id, (*nodeList)[0], err)
return diag.Errorf("unable to get database %s node %s: %v", res.ID, (*nodeList)[0], err)
}

res.Region = node.Region
Expand All @@ -250,12 +255,12 @@ func dataSourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceD
advancedConfigEndpoint := fmt.Sprintf("%s/advancedConfiguration", serviceEndpoint)
advancedConfigMap := &map[string]string{}
if err := config.OVHClient.GetWithContext(ctx, advancedConfigEndpoint, advancedConfigMap); err != nil {
return diag.Errorf("unable to get database %s advanced configuration: %v", res.Id, err)
return diag.Errorf("unable to get database %s advanced configuration: %v", res.ID, err)
}
res.AdvancedConfiguration = *advancedConfigMap
}

for k, v := range res.ToMap() {
for k, v := range res.toMap() {
if k != "id" {
d.Set(k, v)
} else {
Expand Down
45 changes: 27 additions & 18 deletions ovh/resource_cloud_project_database.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,14 @@ func resourceCloudProjectDatabase() *schema.Resource {
return d.Get("engine").(string) != "kafka" || new == old
},
},
"kafka_schema_registry": {
Type: schema.TypeBool,
Description: "Defines whether the schema registry is enabled on a Kafka cluster",
Optional: true,
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
return d.Get("engine").(string) != "kafka" || new == old
},
},
"nodes": {
Type: schema.TypeList,
Description: "List of nodes composing the service",
Expand Down Expand Up @@ -245,15 +253,15 @@ func resourceCloudProjectDatabase() *schema.Resource {
}

func resourceCloudProjectDatabaseImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
givenId := d.Id()
givenID := d.Id()
n := 3
splitId := strings.SplitN(givenId, "/", n)
if len(splitId) != n {
splitID := strings.SplitN(givenID, "/", n)
if len(splitID) != n {
return nil, fmt.Errorf("import Id is not service_name/engine/databaseId formatted")
}
serviceName := splitId[0]
engine := splitId[1]
id := splitId[2]
serviceName := splitID[0]
engine := splitID[1]
id := splitID[2]
d.SetId(id)
d.Set("engine", engine)
d.Set("service_name", serviceName)
Expand All @@ -272,7 +280,7 @@ func resourceCloudProjectDatabaseCreate(ctx context.Context, d *schema.ResourceD
url.PathEscape(serviceName),
url.PathEscape(engine),
)
params, err := (&CloudProjectDatabaseCreateOpts{}).FromResource(d)
params, err := (&CloudProjectDatabaseCreateOpts{}).fromResource(d)
if err != nil {
return diag.Errorf("service creation failed : %q", err)
}
Expand All @@ -284,17 +292,18 @@ func resourceCloudProjectDatabaseCreate(ctx context.Context, d *schema.ResourceD
return diag.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, err)
}

log.Printf("[DEBUG] Waiting for database %s to be READY", res.Id)
err = waitForCloudProjectDatabaseReady(ctx, config.OVHClient, serviceName, engine, res.Id, d.Timeout(schema.TimeoutCreate))
log.Printf("[DEBUG] Waiting for database %s to be READY", res.ID)
err = waitForCloudProjectDatabaseReady(ctx, config.OVHClient, serviceName, engine, res.ID, d.Timeout(schema.TimeoutCreate))
if err != nil {
return diag.Errorf("timeout while waiting database %s to be READY: %s", res.Id, err.Error())
return diag.Errorf("timeout while waiting database %s to be READY: %s", res.ID, err.Error())
}
log.Printf("[DEBUG] database %s is READY", res.Id)
log.Printf("[DEBUG] database %s is READY", res.ID)

d.SetId(res.Id)
d.SetId(res.ID)

if (engine != "mongodb" && len(d.Get("advanced_configuration").(map[string]interface{})) > 0) ||
(engine == "kafka" && d.Get("kafka_rest_api").(bool)) ||
(engine == "kafka" && d.Get("kafka_schema_registry").(bool)) ||
(engine == "opensearch" && d.Get("opensearch_acls_enabled").(bool)) {
return resourceCloudProjectDatabaseUpdate(ctx, d, meta)
}
Expand Down Expand Up @@ -322,16 +331,16 @@ func resourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceDat
nodesEndpoint := fmt.Sprintf("%s/node", serviceEndpoint)
nodeList := &[]string{}
if err := config.OVHClient.GetWithContext(ctx, nodesEndpoint, nodeList); err != nil {
return diag.Errorf("unable to get database %s nodes: %v", res.Id, err)
return diag.Errorf("unable to get database %s nodes: %v", res.ID, err)
}

if len(*nodeList) == 0 {
return diag.Errorf("no node found for database %s", res.Id)
return diag.Errorf("no node found for database %s", res.ID)
}
nodeEndpoint := fmt.Sprintf("%s/%s", nodesEndpoint, url.PathEscape((*nodeList)[0]))
node := &CloudProjectDatabaseNodes{}
if err := config.OVHClient.GetWithContext(ctx, nodeEndpoint, node); err != nil {
return diag.Errorf("unable to get database %s node %s: %v", res.Id, (*nodeList)[0], err)
return diag.Errorf("unable to get database %s node %s: %v", res.ID, (*nodeList)[0], err)
}

res.Region = node.Region
Expand All @@ -340,12 +349,12 @@ func resourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceDat
advancedConfigEndpoint := fmt.Sprintf("%s/advancedConfiguration", serviceEndpoint)
advancedConfigMap := &map[string]string{}
if err := config.OVHClient.GetWithContext(ctx, advancedConfigEndpoint, advancedConfigMap); err != nil {
return diag.Errorf("unable to get database %s advanced configuration: %v", res.Id, err)
return diag.Errorf("unable to get database %s advanced configuration: %v", res.ID, err)
}
res.AdvancedConfiguration = *advancedConfigMap
}

for k, v := range res.ToMap() {
for k, v := range res.toMap() {
if k != "id" {
d.Set(k, v)
} else {
Expand All @@ -366,7 +375,7 @@ func resourceCloudProjectDatabaseUpdate(ctx context.Context, d *schema.ResourceD
url.PathEscape(engine),
url.PathEscape(d.Id()),
)
params, err := (&CloudProjectDatabaseUpdateOpts{}).FromResource(d)
params, err := (&CloudProjectDatabaseUpdateOpts{}).fromResource(d)
if err != nil {
return diag.Errorf("service update failed : %q", err)
}
Expand Down
93 changes: 50 additions & 43 deletions ovh/types_cloud_project_database.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,69 +67,67 @@ type CloudProjectDatabaseResponse struct {
Description string `json:"description"`
Endpoints []CloudProjectDatabaseEndpoint `json:"endpoints"`
Flavor string `json:"flavor"`
Id string `json:"id"`
ID string `json:"id"`
IPRestrictions []CloudProjectDatabaseIPRestrictionResponse `json:"ipRestrictions"`
MaintenanceTime string `json:"maintenanceTime"`
NetworkId string `json:"networkId"`
NetworkID string `json:"networkId"`
NetworkType string `json:"networkType"`
Plan string `json:"plan"`
NodeNumber int `json:"nodeNumber"`
Region string `json:"region"`
RestApi bool `json:"restApi"`
RestAPI bool `json:"restApi"`
SchemaRegistry bool `json:"schemaRegistry"`
Status string `json:"status"`
SubnetId string `json:"subnetId"`
SubnetID string `json:"subnetId"`
Version string `json:"version"`
Disk CloudProjectDatabaseDisk `json:"disk"`
AdvancedConfiguration map[string]string `json:"advancedConfiguration"`
}

func (s *CloudProjectDatabaseResponse) String() string {
return fmt.Sprintf("%s(%s): %s", s.Description, s.Id, s.Status)
}

func (v CloudProjectDatabaseResponse) ToMap() map[string]interface{} {
func (r CloudProjectDatabaseResponse) toMap() map[string]interface{} {
obj := make(map[string]interface{})
obj["backup_regions"] = v.Backups.Regions
obj["backup_time"] = v.Backups.Time
obj["created_at"] = v.CreatedAt
obj["description"] = v.Description
obj["id"] = v.Id
obj["backup_regions"] = r.Backups.Regions
obj["backup_time"] = r.Backups.Time
obj["created_at"] = r.CreatedAt
obj["description"] = r.Description
obj["id"] = r.ID

var ipRests []map[string]interface{}
for _, ir := range v.IPRestrictions {
for _, ir := range r.IPRestrictions {
ipRests = append(ipRests, ir.toMap())
}
obj["ip_restrictions"] = ipRests

var endpoints []map[string]interface{}
for _, e := range v.Endpoints {
for _, e := range r.Endpoints {
endpoints = append(endpoints, e.ToMap())
}
obj["endpoints"] = endpoints

obj["flavor"] = v.Flavor
obj["kafka_rest_api"] = v.RestApi
obj["maintenance_time"] = v.MaintenanceTime
obj["network_type"] = v.NetworkType
obj["flavor"] = r.Flavor
obj["kafka_rest_api"] = r.RestAPI
obj["kafka_schema_registry"] = r.SchemaRegistry
obj["maintenance_time"] = r.MaintenanceTime
obj["network_type"] = r.NetworkType

var nodes []map[string]interface{}
for i := 0; i < v.NodeNumber; i++ {
for i := 0; i < r.NodeNumber; i++ {
node := CloudProjectDatabaseNodes{
Region: v.Region,
NetworkId: v.NetworkId,
SubnetId: v.SubnetId,
Region: r.Region,
NetworkId: r.NetworkID,
SubnetId: r.SubnetID,
}
nodes = append(nodes, node.ToMap())
}
obj["nodes"] = nodes

obj["opensearch_acls_enabled"] = v.AclsEnabled
obj["plan"] = v.Plan
obj["status"] = v.Status
obj["version"] = v.Version
obj["disk_size"] = v.Disk.Size
obj["disk_type"] = v.Disk.Type
obj["advanced_configuration"] = v.AdvancedConfiguration
obj["opensearch_acls_enabled"] = r.AclsEnabled
obj["plan"] = r.Plan
obj["status"] = r.Status
obj["version"] = r.Version
obj["disk_size"] = r.Disk.Size
obj["disk_type"] = r.Disk.Type
obj["advanced_configuration"] = r.AdvancedConfiguration

return obj
}
Expand Down Expand Up @@ -185,7 +183,7 @@ func (v CloudProjectDatabaseNodes) ToMap() map[string]interface{} {
}

type CloudProjectDatabaseCreateOpts struct {
Backups CloudProjectDatabaseBackups `json:"backups,omitempty"`
Backups *CloudProjectDatabaseBackups `json:"backups,omitempty"`
Description string `json:"description,omitempty"`
Disk CloudProjectDatabaseDisk `json:"disk,omitempty"`
IPRestrictions []CloudProjectDatabaseIPRestriction `json:"ipRestrictions,omitempty"`
Expand All @@ -212,7 +210,7 @@ type CloudProjectDatabaseNodesPattern struct {
Region string `json:"region"`
}

func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) (*CloudProjectDatabaseCreateOpts, error) {
func (opts *CloudProjectDatabaseCreateOpts) fromResource(d *schema.ResourceData) (*CloudProjectDatabaseCreateOpts, error) {
opts.Description = d.Get("description").(string)
opts.Plan = d.Get("plan").(string)

Expand Down Expand Up @@ -251,33 +249,39 @@ func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData)
if err != nil {
return nil, err
}
time := d.Get("backup_time").(string)

opts.Backups = CloudProjectDatabaseBackups{
Regions: regions,
Time: d.Get("backup_time").(string),
if len(regions) != 0 || time != "" {
opts.Backups = &CloudProjectDatabaseBackups{
Regions: regions,
Time: time,
}
}

return opts, nil
}

type CloudProjectDatabaseUpdateOpts struct {
AclsEnabled bool `json:"aclsEnabled,omitempty"`
Backups CloudProjectDatabaseBackups `json:"backups,omitempty"`
Backups *CloudProjectDatabaseBackups `json:"backups,omitempty"`
Description string `json:"description,omitempty"`
Disk CloudProjectDatabaseDisk `json:"disk,omitempty"`
Flavor string `json:"flavor,omitempty"`
IPRestrictions []CloudProjectDatabaseIPRestriction `json:"ipRestrictions,omitempty"`
Plan string `json:"plan,omitempty"`
RestApi bool `json:"restApi,omitempty"`
RestAPI bool `json:"restApi,omitempty"`
SchemaRegistry bool `json:"schemaRegistry,omitempty"`
Version string `json:"version,omitempty"`
}

func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) (*CloudProjectDatabaseUpdateOpts, error) {
func (opts *CloudProjectDatabaseUpdateOpts) fromResource(d *schema.ResourceData) (*CloudProjectDatabaseUpdateOpts, error) {
engine := d.Get("engine").(string)
if engine == "opensearch" {
opts.AclsEnabled = d.Get("opensearch_acls_enabled").(bool)
}
if engine == "kafka" {
opts.RestApi = d.Get("kafka_rest_api").(bool)
opts.RestAPI = d.Get("kafka_rest_api").(bool)
opts.SchemaRegistry = d.Get("kafka_schema_registry").(bool)
}

opts.Description = d.Get("description").(string)
Expand All @@ -301,10 +305,13 @@ func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData)
if err != nil {
return nil, err
}
time := d.Get("backup_time").(string)

opts.Backups = CloudProjectDatabaseBackups{
Regions: regions,
Time: d.Get("backup_time").(string),
if engine != "kafka" && (len(regions) != 0 || time != "") {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not the same check as in CloudProjectDatabaseCreateOpts.fromResource ? (no engine != "kafka" there)

Copy link
Contributor Author

@lpatte lpatte Apr 22, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

backups field is ignored for a lot of engine in API (because the engine has not backup or backup not configurable) in POST and PUT, Kafka is the only engine where the schema of the PUT do not has backups fields. So this engine != "kafka" will ignore backups attribute for Kafka when update, no need to add this in CloudProjectDatabaseCreateOpts.fromResource because our API already ignore this field

opts.Backups = &CloudProjectDatabaseBackups{
Regions: regions,
Time: time,
}
}

return opts, nil
Expand Down
1 change: 1 addition & 0 deletions website/docs/d/cloud_project_database.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ The following attributes are exported:
* `ip` - Authorized IP
* `status` - Current status of the IP restriction.
* `kafka_rest_api` - Defines whether the REST API is enabled on a kafka cluster.
* `kafka_schema_registry` - Defines whether the schema registry is enabled on a Kafka cluster
* `maintenance_time` - Time on which maintenances can start every day.
* `network_type` - Type of network of the cluster.
* `nodes` - List of nodes object.
Expand Down
Loading