diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go
index e75002a7f015fa117009c68dcb57750c5d0f398d..b9759ca86509b0e9b7f8bd7025bba500e7af389d 100644
--- a/pkg/cluster/cluster.go
+++ b/pkg/cluster/cluster.go
@@ -342,7 +342,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
 	if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName {
 		needsReplace = true
 		needsRollUpdate = true
-		reasons = append(reasons, "new statefulset's serviceAccountName service asccount name doesn't match the current one")
+		reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one")
 	}
 	if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds {
 		needsReplace = true
@@ -462,16 +462,16 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
 func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
 	equal := true
 	if a != nil {
-		equal = compareResoucesAssumeFirstNotNil(a, b)
+		equal = compareResourcesAssumeFirstNotNil(a, b)
 	}
 	if equal && (b != nil) {
-		equal = compareResoucesAssumeFirstNotNil(b, a)
+		equal = compareResourcesAssumeFirstNotNil(b, a)
 	}
 
 	return equal
 }
 
-func compareResoucesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
+func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
 	if b == nil || (len(b.Requests) == 0) {
 		return len(a.Requests) == 0
 	}
@@ -875,7 +875,7 @@ func (c *Cluster) initInfrastructureRoles() error {
 	return nil
 }
 
-// resolves naming conflicts between existing and new roles by chosing either of them.
+// resolves naming conflicts between existing and new roles by choosing either of them.
 func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) spec.PgUser {
 	var result spec.PgUser
 	if newRole.Origin >= currentRole.Origin {
@@ -969,7 +969,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
 	// signal the role label waiting goroutine to close the shop and go home
 	close(stopCh)
 	// wait until the goroutine terminates, since unregisterPodSubscriber
-	// must be called before the outer return; otherwsise we risk subscribing to the same pod twice.
+	// must be called before the outer return; otherwise we risk subscribing to the same pod twice.
 	wg.Wait()
 	// close the label waiting channel no sooner than the waiting goroutine terminates.
 	close(podLabelErr)
diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go
index aefa0404acf3df4f970d70162462765f152bf4d1..a16e810ed4f256cafe2851b858aab630acc0fc53 100644
--- a/pkg/cluster/k8sres.go
+++ b/pkg/cluster/k8sres.go
@@ -329,7 +329,7 @@ func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]stri
 	return []v1.Toleration{}
 }
 
-// isBootstrapOnlyParameter checks asgainst special Patroni bootstrap parameters.
+// isBootstrapOnlyParameter checks against special Patroni bootstrap parameters.
 // Those parameters must go to the bootstrap/dcs/postgresql/parameters section.
 // See http://patroni.readthedocs.io/en/latest/dynamic_configuration.html.
 func isBootstrapOnlyParameter(param string) bool {
@@ -1366,7 +1366,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
 		return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
 	}
 
-	// overwrite specifc params of logical backups pods
+	// overwrite specific params of logical backups pods
 	podTemplate.Spec.Affinity = &podAffinity
 	podTemplate.Spec.RestartPolicy = "Never" // affects containers within a pod
 
diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go
index e8674283ad1fc59881a764ce73f1b248484abd9d..c89edac63417d10d2f422fb39e6d6a0c6fb44d74 100644
--- a/pkg/cluster/resources.go
+++ b/pkg/cluster/resources.go
@@ -361,7 +361,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
 	// TODO: check if it possible to change the service type with a patch in future versions of Kubernetes
 	if newService.Spec.Type != c.Services[role].Spec.Type {
 		// service type has changed, need to replace the service completely.
-		// we cannot use just pach the current service, since it may contain attributes incompatible with the new type.
+		// we cannot use just patch the current service, since it may contain attributes incompatible with the new type.
 		var (
 			currentEndpoint *v1.Endpoints
 			err             error
@@ -369,7 +369,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
 
 		if role == Master {
 			// for the master service we need to re-create the endpoint as well. Get the up-to-date version of
-			// the addresses stored in it before the service is deleted (deletion of the service removes the endpooint)
+			// the addresses stored in it before the service is deleted (deletion of the service removes the endpoint)
 			currentEndpoint, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{})
 			if err != nil {
 				return fmt.Errorf("could not get current cluster %s endpoints: %v", role, err)