I'm facing issues with misaligned text extraction from images. I suspect the problem lies in formatting rather than extraction. Can I utilize bounding box coordinates to improve text alignment?see this is the original image where the price of column is starting from the same point

Here is the code:
def extract_text(self, image): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) text_regions = [] retval, labels, stats, centroids = cv2.connectedComponentsWithStats(gray_image) section_groups = {} section_threshold = 20 for i in range(1, retval): x, y, w, h, area = stats[i] section_groups.setdefault(y // section_threshold, []).append((x, y, w, h)) for section_key, components in section_groups.items(): section_image = gray_image[components[0][1]:components[-1][1] + components[-1][3], :] extracted_text, section_boxes = self._extract_text_and_boxes(section_image) text_regions.append((extracted_text, section_boxes)) return text_regionsdef _extract_text_and_boxes(self, component): extracted_text = pytesseract.image_to_string(component, config=self.config) boxes = self.get_text_boxes(component) return extracted_text, boxesdef get_text_boxes(self, image): results = pytesseract.image_to_data(image, output_type=Output.DICT) boxes = [] for i in range(0, len(results["text"])): x = results["left"][i] y = results["top"][i] w = results["width"][i] h = results["height"][i] text = results["text"][i] conf = float(results["conf"][i]) if conf > 0: boxes.append((x, y, w, h)) return boxes