Draw Image X Y Circle

Python cv2.circle() Examples

The post-obit are 30 code examples for showing how to use cv2.circle() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't similar, and go to the original project or source file by following the links in a higher place each example.

Y'all may cheque out the related API usage on the sidebar.

You may also want to check out all bachelor functions/classes of the module cv2 , or endeavour the search function .

Example 1

def update(_=None):     noise = cv2.getTrackbarPos('dissonance', 'fit line')     n = cv2.getTrackbarPos('indicate due north', 'fit line')     r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0     outn = int(n*r)      p0, p1 = (90, 80), (westward-90, h-80)     img = np.zeros((h, westward, 3), np.uint8)     cv2.line(img, toint(p0), toint(p1), (0, 255, 0))      if n > 0:         line_points = sample_line(p0, p1, n-outn, noise)         outliers = np.random.rand(outn, 2) * (w, h)         points = np.vstack([line_points, outliers])         for p in line_points:             cv2.circle(img, toint(p), ii, (255, 255, 255), -1)         for p in outliers:             cv2.circle(img, toint(p), 2, (64, 64, 255), -1)         func = getattr(cv2, cur_func_name)         vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)         cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*west), int(cy+vy*westward)), (0, 0, 255))      draw_str(img, (20, 20), cur_func_name)     cv2.imshow('fit line', img)          

Instance 2

def ProcessFrame(self, frame):         # segment arm region         segment = cocky.SegmentArm(frame)          # brand a re-create of the segmented image to draw on         draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)          # draw some helpers for correctly placing mitt         cv2.circle(draw,(self.imgWidth/2,self.imgHeight/2),3,[255,102,0],2)                cv2.rectangle(draw, (cocky.imgWidth/3,self.imgHeight/3), (self.imgWidth*2/3, self.imgHeight*2/3), [255,102,0],two)          # find the hull of the segmented area, and based on that observe the         # convexity defects         [contours,defects] = self.FindHullDefects(segment)          # detect the number of fingers depending on the contours and convexity defects         # draw defects that belong to fingers dark-green, others red         [nofingers,depict] = self.DetectNumberFingers(contours, defects, draw)          # impress number of fingers on prototype         cv2.putText(draw, str(nofingers), (30,30), cv2.FONT_HERSHEY_SIMPLEX, i, (255,255,255))         return draw          

Example 3

def hulk(x):     """Given an Nx3 matrix of hulk positions and size,      create N img_size x img_size images, each with a hulk fatigued on      them given by the value in each row of x          One row of x = [x,y,radius]."""     y = np.zeros((ten.shape[0], img_size, img_size))     for i, particle in enumerate(x):         rr, cc = skimage.draw.circle(             particle[0], particle[1], max(particle[two], 1), shape=(img_size, img_size)         )         y[i, rr, cc] = 1     return y   #%%  # names (this is just for reference for the moment!)          

Instance 4

def update(self, radarData):         self.img = np.zeros((self.top, cocky.width, self.channels), np.uint8)         cv2.line(self.img, (ten, 0), (cocky.width/2 - 5, self.height), (100, 255, 255))         cv2.line(self.img, (self.width - 10, 0), (cocky.width/ii + 5, self.elevation), (100, 255, 255))          for track_number in range(1, 65):             if str(track_number)+'_track_range' in radarData:                 track_range = radarData[str(track_number)+'_track_range']                 track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180                  x_pos = math.cos(track_angle)*track_range*iv                 y_pos = math.sin(track_angle)*track_range*iv                  cv2.circumvolve(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - ten), 5, (255, 255, 255))                 #cv2.putText(cocky.img, str(track_number),                  #    (cocky.width/two + int(x_pos)-2, self.pinnacle - int(y_pos) - 10), self.font, 1, (255,255,255), two)          cv2.imshow("Radar", self.img)         cv2.waitKey(ii)          

Example 5

def update(self, radarData):         cocky.img = np.zeros((cocky.top, self.width, self.channels), np.uint8)         cv2.line(cocky.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255))         cv2.line(self.img, (self.width - x, 0), (self.width/2 + v, self.height), (100, 255, 255))          for track_number in range(1, 65):             if str(track_number)+'_track_range' in radarData:                 track_range = radarData[str(track_number)+'_track_range']                 track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180                  x_pos = math.cos(track_angle)*track_range*iv                 y_pos = math.sin(track_angle)*track_range*four                  cv2.circle(self.img, (self.width/ii + int(x_pos), cocky.height - int(y_pos) - 10), 5, (255, 255, 255))                 #cv2.putText(cocky.img, str(track_number),                  #    (self.width/two + int(x_pos)-ii, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2)          cv2.imshow("Radar", cocky.img)         cv2.waitKey(ii)          

Example 6

def mark_hand_center(frame_in,cont):         max_d=0     pt=(0,0)     10,y,w,h = cv2.boundingRect(cont)     for ind_y in xrange(int(y+0.three*h),int(y+0.8*h)): #effectually 0.25 to 0.vi region of height (Faster calculation with ok results)         for ind_x in xrange(int(x+0.3*due west),int(x+0.6*due west)): #around 0.3 to 0.6 region of width (Faster adding with ok results)             dist= cv2.pointPolygonTest(cont,(ind_x,ind_y),True)             if(dist>max_d):                 max_d=dist                 pt=(ind_x,ind_y)     if(max_d>radius_thresh*frame_in.shape[i]):         thresh_score=True         cv2.circle(frame_in,pt,int(max_d),(255,0,0),2)     else:         thresh_score=False     return frame_in,pt,max_d,thresh_score  # half dozen. Find and brandish gesture          

Instance 7

def draw_humans(npimg, humans, imgcopy=Faux):         if imgcopy:             npimg = np.copy(npimg)         image_h, image_w = npimg.shape[:2]         centers = {}         for human in humans:             # describe signal             for i in range(common.CocoPart.Background.value):                 if i not in man.body_parts.keys():                     continue                  body_part = human being.body_parts[i]                 center = (int(body_part.ten * image_w + 0.v), int(body_part.y * image_h + 0.5))                 centers[i] = middle                 cv2.circle(npimg, center, 3, common.CocoColors[i], thickness=iii, lineType=8, shift=0)              # describe line             for pair_order, pair in enumerate(common.CocoPairsRender):                 if pair[0] not in homo.body_parts.keys() or pair[1] non in human.body_parts.keys():                     go along                  npimg = cv2.line(npimg, centers[pair[0]], centers[pair[ane]], common.CocoColors[pair_order], 3)          return npimg          

Instance 8

def draw_limbs(prototype, pose_2d, visible):     """Draw the 2D pose without the occluded/not visible joints."""      _COLORS = [         [0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0],         [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170],         [170, 0, 255]     ]     _LIMBS = np.array([0, 1, two, 3, 3, iv, 5, 6, 6, 7, 8, nine,                        9, 10, xi, 12, 12, 13]).reshape((-1, 2))      _NORMALISATION_FACTOR = int(math.flooring(math.sqrt(image.shape[0] * image.shape[1] / NORMALISATION_COEFFICIENT)))      for oid in range(pose_2d.shape[0]):         for lid, (p0, p1) in enumerate(_LIMBS):             if not (visible[oid][p0] and visible[oid][p1]):                 continue             y0, x0 = pose_2d[oid][p0]             y1, x1 = pose_2d[oid][p1]             cv2.circumvolve(image, (x0, y0), JOINT_DRAW_SIZE *_NORMALISATION_FACTOR , _COLORS[lid], -ane)             cv2.circle(image, (x1, y1), JOINT_DRAW_SIZE*_NORMALISATION_FACTOR , _COLORS[lid], -1)             cv2.line(paradigm, (x0, y0), (x1, y1),                      _COLORS[hat], LIMB_DRAW_SIZE*_NORMALISATION_FACTOR , 16)          

Example nine

def plot_3d_pts(img, pts, center, calib_file=None, cam_to_img=None, relative=False, constraint_idx=None):     if calib_file is not None:         cam_to_img = get_calibration_cam_to_image(calib_file)      for pt in pts:         if relative:             pt = [i + center[j] for j,i in enumerate(pt)] # more pythonic          indicate = project_3d_pt(pt, cam_to_img)          color = cv_colors.RED.value          if constraint_idx is not None:             color = constraint_to_color(constraint_idx)          cv2.circle(img, (point[0], point[1]), 3, color, thickness=-1)          

Example 10

def annotate(cocky, img):     # paint annotations on the paradigm     for i1 in range(len(self.kpus)):       u1, v1 = int(round(self.kpus[i1][0])), int(round(self.kpus[i1][ane]))       if cocky.pts[i1] is non None:         if len(self.pts[i1].frames) >= 5:           cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)         else:           cv2.circle(img, (u1, v1), color=(0,128,0), radius=iii)         # draw the trail         pts = []         lfid = None         for f, idx in naught(self.pts[i1].frames[-9:][::-1], cocky.pts[i1].idxs[-nine:][::-1]):           if lfid is not None and lfid-1 != f.id:             break           pts.append(tuple(map(lambda x: int(round(x)), f.kpus[idx])))           lfid = f.id         if len(pts) >= ii:           cv2.polylines(img, np.assortment([pts], dtype=np.int32), False, myjet[len(pts)]*255, thickness=1, lineType=16)       else:         cv2.circle(img, (u1, v1), colour=(0,0,0), radius=3)     render img     # inverse of intrinsics matrix          

Case eleven

def load_shapes(self, count, height, width):         """Generate the requested number of synthetic images.         count: number of images to generate.         top, width: the size of the generated images.         """         # Add classes         cocky.add_class("shapes", 1, "square")         self.add_class("shapes", two, "circumvolve")         cocky.add_class("shapes", 3, "triangle")          # Add images         # Generate random specifications of images (i.e. color and         # list of shapes sizes and locations). This is more than compact than         # actual images. Images are generated on the fly in load_image().         for i in range(count):             bg_color, shapes = self.random_image(height, width)             self.add_image("shapes", image_id=i, path=None,                            width=width, pinnacle=height,                            bg_color=bg_color, shapes=shapes)          

Example 12

def draw_shape(self, paradigm, shape, dims, colour):         """Draws a shape from the given specs."""         # Go the heart x, y and the size s         x, y, southward = dims         if shape == 'square':             image = cv2.rectangle(image, (x - s, y - s),                                   (10 + s, y + s), color, -ane)         elif shape == "circumvolve":             image = cv2.circumvolve(image, (ten, y), s, color, -one)         elif shape == "triangle":             points = np.array([[(ten, y - south),                                 (ten - s / math.sin(math.radians(60)), y + due south),                                 (x + southward / math.sin(math.radians(threescore)), y + south),                                 ]], dtype=np.int32)             image = cv2.fillPoly(image, points, color)         return image          

Example 13

def random_shape(cocky, height, width):         """Generates specifications of a random shape that lies within         the given height and width boundaries.         Returns a tuple of 3 valus:         * The shape name (square, circle, ...)         * Shape colour: a tuple of 3 values, RGB.         * Shape dimensions: A tuple of values that ascertain the shape size                             and location. Differs per shape blazon.         """         # Shape         shape = random.option(["square", "circle", "triangle"])         # Color         colour = tuple([random.randint(0, 255) for _ in range(3)])         # Center ten, y         buffer = 20         y = random.randint(buffer, elevation - buffer - i)         x = random.randint(buffer, width - buffer - 1)         # Size         s = random.randint(buffer, height // 4)         render shape, color, (ten, y, due south)          

Example fourteen

def drawMatch(img0,img1,src,tgt,color='b'):     if len(img0.shape)==ii:       img0=np.expand_dims(img0,ii)     if len(img1.shape)==2:       img1=np.expand_dims(img1,2)     h,w = img0.shape[0],img0.shape[1]     img = np.zeros([ii*h,w,3])     img[:h,:,:] = img0     img[h:,:,:] = img1     due north = len(src)     if colour == 'b':         color=(255,0,0)     else:         color=(0,255,0)     for i in range(northward):       cv2.circle(img, (int(src[i,0]), int(src[i,one])), 3,colour,-i)       cv2.circle(img, (int(tgt[i,0]), int(tgt[i,1])+h), 3,colour,-1)       cv2.line(img, (int(src[i,0]),int(src[i,1])),(int(tgt[i,0]),int(tgt[i,i])+h),color,one)     return img          

Example 15

def visualize_joints(bone_list, focus):     m = np.zeros((424, 600, 3))     m.astype(np.uint8)     for bone in bone_list:         p1x = bone[0][0]         p1y = bone[0][1]         p1z = os[0][ii] + 400         p2x = bone[1][0]         p2y = bone[ane][1]         p2z = os[1][2] + 400         p1 = (          int(p1x * focus / p1z + 300.0), int(-p1y * focus / p1z + 204.0))         p2 = (int(p2x * focus / p2z + 300.0), int(-p2y * focus / p2z + 204.0))         if inside_image(p1[0], p1[1]) and inside_image(p2[0], p2[1]):             cv.line(1000, p1, p2, (255, 0, 0), 2)             cv.circle(m, p1, ii, (0, 255, 255), -1)             cv.circle(1000, p2, 2, (0, 255, 255), -i)      return chiliad          

Case 16

def visualize_joints2(bone_list, focus):     m = np.zeros((424, 600, 3))     m.astype(np.uint8)     for bone in bone_list:         p1x = bone[0][0]         p1y = bone[0][1]         p1z = bone[0][two] + 400         p2x = bone[i][0]         p2y = bone[one][one]         p2z = bone[one][two] + 400         p1 = (          int(p1x * focus / p1z + 300.0), int(-p1y * focus / p1z + 204.0))         p2 = (int(p2x * focus / p2z + 300.0), int(-p2y * focus / p2z + 204.0))         if inside_image(p1[0], p1[i]) and inside_image(p2[0], p2[ane]):             cv.line(m, p1, p2, (255, 0, 0), ii)             cv.circle(m, p1, 2, (0, 255, 255), -1)             cv.circumvolve(m, p2, ii, (0, 255, 255), -1)      return m          

Example 17

def plot_kpt(image, kpt):     ''' Depict 68 central points     Args:          image: the input image         kpt: (68, 3).     '''     epitome = image.re-create()     kpt = np.circular(kpt).astype(np.int32)     for i in range(kpt.shape[0]):         st = kpt[i, :2]         prototype = cv2.circumvolve(image,(st[0], st[one]), 1, (0,0,255), 2)           if i in end_list:             continue         ed = kpt[i + 1, :2]         epitome = cv2.line(prototype, (st[0], st[ane]), (ed[0], ed[1]), (255, 255, 255), 1)     return image          

Example 18

def add_coco_hp(image, points, color):      for j in range(17):         cv2.circumvolve(paradigm,                  (points[j, 0], points[j, 1]), 2, (int(colour[0]), int(color[ane]), int(color[2])), -i)                       stickwidth = 2     cur_canvas = paradigm.copy()                  for j, e in enumerate(_kp_connections):         if points[east].min() > 0:             X = [points[e[0], ane], points[e[ane], 1]]             Y = [points[e[0], 0], points[e[1], 0]]             mX = np.mean(X)             mY = np.mean(Y)             length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** two) ** 0.v             bending = math.degrees(math.atan2(X[0] - Ten[ane], Y[0] - Y[1]))             polygon = cv2.ellipse2Poly((int(mY),int(mX)), (int(length/ii), stickwidth), int(bending), 0, 360, i)             cv2.fillConvexPoly(cur_canvas, polygon, (int(colour[0]), int(color[ane]), int(color[2])))             epitome = cv2.addWeighted(epitome, 0.five, cur_canvas, 0.5, 0)      render prototype          

Case 19

def test_image(image_path, model_path):     frame = cv2.imread(image_path)     h, w = frame.shape[:two]     landmarks = True     centerface = CenterFace(model_path=model_path, landmarks=landmarks)     centerface.transform(h, west)     if landmarks:         dets, lms = centerface(frame, threshold=0.35)     else:         dets = centerface(frame, threshold=0.35)      for det in dets:         boxes, score = det[:4], det[4]         cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])), (int(boxes[two]), int(boxes[iii])), (2, 255, 0), 1)     if landmarks:         for lm in lms:             cv2.circle(frame, (int(lm[0]), int(lm[1])), ii, (0, 0, 255), -1)             cv2.circle(frame, (int(lm[2]), int(lm[three])), ii, (0, 0, 255), -1)             cv2.circle(frame, (int(lm[4]), int(lm[five])), 2, (0, 0, 255), -1)             cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)             cv2.circumvolve(frame, (int(lm[eight]), int(lm[9])), 2, (0, 0, 255), -i)     cv2.imshow('out', frame)     cv2.waitKey(0)          

Example 20

def plot_kpt(image, kpt):     ''' Draw 68 key points     Args:          epitome: the input image         kpt: (68, 3).     '''     epitome = image.copy()     kpt = np.circular(kpt).astype(np.int32)     for i in range(kpt.shape[0]):         st = kpt[i, :2]         image = cv2.circle(image, (st[0], st[i]), 1, (0, 0, 255), 2)         if i in end_list:             continue         ed = kpt[i + 1, :2]         image = cv2.line(image, (st[0], st[1]), (ed[0], ed[i]), (255, 255, 255), 1)     return image          

Example 21

def apply_keypoint(epitome, keypoint, num_joints=17):     image = image.astype(np.uint8)          edges = [[0, 1], [0, 2], [1, 3], [ii, iv],                      [3, 5], [four, 6], [5, vi],                      [5, 7], [7, 9], [vi, 8], [8, 10],                      [five, xi], [6, 12], [11, 12],                      [11, 13], [13, 15], [12, 14], [14, xvi]]          for j in range(num_joints):         if keypoint[j][2]>0.:             cv2.circle(image,                       (keypoint[j, 0], keypoint[j, 1]), three, (255,255,255), ii)                           stickwidth = 2                 for j, e in enumerate(edges):         if keypoint[e[0],ii] > 0. and keypoint[east[1],ii] > 0.:             centerA = keypoint[e[0],:2]             centerB = keypoint[e[1],:2]             cv2.line(epitome,(centerA[0], centerA[ane]),(centerB[0], centerB[1]),(255, 255,255),2)     return image          

Example 22

def chief():     """Exam code"""     global mp     mp = np.array((2, ane), np.float32)  # measurement      def onmouse(one thousand, x, y, s, p):         global mp         mp = np.array([[np.float32(x)], [np.float32(y)]])      cv2.namedWindow("kalman")     cv2.setMouseCallback("kalman", onmouse)     kalman = Stabilizer(4, ii)     frame = np.zeros((480, 640, 3), np.uint8)  # cartoon sheet      while True:         kalman.update(mp)         point = kalman.prediction         state = kalman.filter.statePost         cv2.circumvolve(frame, (land[0], land[1]), 2, (255, 0, 0), -ane)         cv2.circle(frame, (point[0], indicate[i]), ii, (0, 255, 0), -ane)         cv2.imshow("kalman", frame)         k = cv2.waitKey(thirty) & 0xFF         if thou == 27:             suspension          

Example 23

def __getitem__(self, idx):     # traj sizeL (n_frames, n_balls, 4)     traj = self.dataset[idx]     vid_len, n_balls = traj.shape[:two]     if self.is_train:       start = random.randint(0, vid_len - self.n_frames)     else:       offset = 0      n_channels = ane     images = np.zeros([self.n_frames, self.size, self.size, n_channels], np.uint8)     positions = []     for fid in range(self.n_frames):       xy = []       for bid in range(n_balls):         # each ball:         ball = traj[start + fid, bid]         x, y = int(round(self.scale * brawl[0])), int(round(cocky.calibration * brawl[1]))         images[fid] = cv2.circle(images[fid], (x, y), int(cocky.radius * ball[3]),                                  255, -ane)         xy.suspend([x / self.size, y / self.size])       positions.suspend(xy)      if self.transform is not None:       images = self.transform(images)      input = images[:self.n_frames_input]     if self.n_frames_output > 0:       output = images[self.n_frames_input:]     else:       output = []      if not cocky.return_positions:       return input, output     else:       positions = np.assortment(positions)       return input, output, positions          

Example 24

def circle(cocky, x, y, radius, characterization=None):         """Draw a circle.          Parameters         ----------         ten : bladder | int             Center of the circle (x-axis).         y : float | int             Center of the circle (y-axis).         radius : float | int             Radius of the circle in pixels.         label : Optional[str]             A text label that is placed at the center of the circumvolve.          """         image_size = int(radius + self.thickness + ane.5)  # really half size         roi = int(10 - image_size), int(y - image_size), \             int(2 * image_size), int(two * image_size)         if not is_in_bounds(cocky.image, roi):             return          paradigm = view_roi(self.paradigm, roi)         eye = image.shape[ane] // 2, paradigm.shape[0] // 2         cv2.circle(             image, center, int(radius + .5), self._color, self.thickness)         if label is not None:             cv2.putText(                 self.image, label, center, cv2.FONT_HERSHEY_PLAIN,                 2, cocky.text_color, 2)          

Example 25

def showTransform(epitome):  # pragma: no cover     im = image.copy()     for (cx, cy) in pts1:         cv2.circle(im, (int(cx), int(cy)), 8, (0, 255, 0), -1)     imshow(im, name="transform")          

Example 26

def object_track(self, img, conf_th=0.3, conf_class=[]):         output = self.detector.prediction(img)         df = self.detector.filter_prediction(output, img, conf_th=conf_th, conf_class=conf_class)         img = self.detector.draw_boxes(img, df)         boxes = df[['x1', 'y1', 'x2', 'y2']].values         objects = cocky.ct.update(boxes)         if len(boxes) > 0 and (df['class_name'].str.contains('person').any()):             for (objectID, centroid) in objects.items():                 text = "ID {}".format(objectID)                 cv2.putText(img, text, (centroid[0] - ten, centroid[1] - 10),                         cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), two)                 cv2.circle(img, (centroid[0], centroid[1]), four, (0, 255, 0), -i)         return img          

Case 27

def ObjectTracking(self):     detector = Detector()     myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),                         recursive=True)     newdict = reduce(lambda a, b: reduce_tracking(a,b), myiter, dict())     startID = max(map(int, newdict.keys()), default=0) + 1     ct = CentroidTracker(startID=startID)     with PiCamera() as photographic camera:         camera.resolution = (1280, 960)  # twice superlative and widht         camera.rotation = int(str(bone.environ['CAMERA_ROTATION']))         photographic camera.framerate = 10         with PiRGBArray(camera, size=(WIDTH, Height)) as output:             while True:                 camera.capture(output, 'bgr', resize=(WIDTH, HEIGHT))                 img = output.array                 result = detector.prediction(img)                 df = detector.filter_prediction(result, img)                 img = detector.draw_boxes(img, df)                 boxes = df[['x1', 'y1', 'x2', 'y2']].values                 previous_object_ID = ct.nextObjectID                 objects = ct.update(boxes)                 if len(boxes) > 0 and (df['class_name'].str.contains('person').whatever()) and previous_object_ID in listing(objects.keys()):                     for (objectID, centroid) in objects.items():                         text = "ID {}".format(objectID)                         cv2.putText(img, text, (centroid[0] - x, centroid[1] - 10),                                 cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), ii)                         cv2.circumvolve(img, (centroid[0], centroid[i]), four, (0, 255, 0), -1)                      twenty-four hour period = datetime.now().strftime("%Y%m%d")                     directory = bone.path.bring together(IMAGE_FOLDER, 'pi', 24-hour interval)                     if not os.path.exists(directory):                         bone.makedirs(directory)                     ids = "-".bring together(list([str(i) for i in objects.keys()]))                     hour = datetime.now().strftime("%H%K%S")                     filename_output = os.path.join(                             directory, "{}_person_{}_.jpg".format(hr, ids)                             )                     cv2.imwrite(filename_output, img)                 time.slumber(0.300)          

Example 28

def object_track(self, img, conf_th=0.iii, conf_class=[]):         output = self.detector.prediction(img)         df = self.detector.filter_prediction(output, img, conf_th=conf_th, conf_class=conf_class)         img = cocky.detector.draw_boxes(img, df)         boxes = df[['x1', 'y1', 'x2', 'y2']].values         objects = self.ct.update(boxes)         if len(boxes) > 0 and (df['class_name'].str.contains('person').any()):             for (objectID, centroid) in objects.items():                 text = "ID {}".format(objectID)                 cv2.putText(img, text, (centroid[0] - ten, centroid[i] - 10),                         cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)                 cv2.circle(img, (centroid[0], centroid[1]), iv, (0, 255, 0), -1)         return img          

Example 29

def object_track(self, img, conf_th=0.3, conf_class=[]):         output = cocky.detector.prediction(img)         boxes, confs, clss = cocky.detector.filter_prediction(output, img, conf_th=conf_th, conf_class=conf_class)         img = cocky.detector.draw_boxes(img, boxes, confs, clss)         objects = self.ct.update(boxes)         if len(boxes) > 0 and 1 in clss:             for (objectID, centroid) in objects.items():                 text = "ID {}".format(objectID)                 cv2.putText(img, text, (centroid[0] - ten, centroid[1] - 10),                         cv2.FONT_HERSHEY_SIMPLEX, 0.five, (0, 255, 0), 2)                 cv2.circle(img, (centroid[0], centroid[ane]), 4, (0, 255, 0), -1)         return img          

Case 30

def draw_datches(img1, kp1, img2, kp2, matches, color=None, kp_radius=5,                  thickness=two, margin=twenty):     # Create frame     if len(img1.shape) == 3:         new_shape = (max(img1.shape[0], img2.shape[0]),                      img1.shape[1]+img2.shape[ane]+margin,                      img1.shape[2])     elif len(img1.shape) == two:         new_shape = (max(img1.shape[0],                      img2.shape[0]),                      img1.shape[1]+img2.shape[ane]+margin)     new_img = np.ones(new_shape, type(img1.flat[0]))*255      # Identify original images     new_img[0:img1.shape[0], 0:img1.shape[i]] = img1     new_img[0:img2.shape[0],             img1.shape[1]+margin:img1.shape[ane]+img2.shape[one]+margin] = img2      # Draw lines betwixt matches     if colour:         c = color     for m in matches:         # Generate random color for RGB/BGR and grayscale images equally needed.         if non color:             if len(img1.shape) == 3:                 c = np.random.randint(0, 256, 3)             else:                 c = np.random.randint(0, 256)             c = (int(c[0]), int(c[1]), int(c[2]))          end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))         end2 = tuple(np.round(kp2[thou.queryIdx].pt).astype(int)                      + np.array([img1.shape[ane]+margin, 0]))         cv2.line(new_img, end1, end2, c, thickness, lineType=cv2.LINE_AA)         cv2.circumvolve(new_img, end1, kp_radius, c, thickness, lineType=cv2.LINE_AA)         cv2.circle(new_img, end2, kp_radius, c, thickness, lineType=cv2.LINE_AA)     return new_img          

hyltonfiette.blogspot.com

Source: https://www.programcreek.com/python/example/84097/cv2.circle

Belum ada Komentar untuk "Draw Image X Y Circle"

Posting Komentar

Iklan Atas Artikel

Iklan Tengah Artikel 1

Iklan Tengah Artikel 2

Iklan Bawah Artikel