исправлен следующий код tkinter на pycharm

привет, я тестирую распознавание алфавита языка жестов, используя yolov8, tkinter для отображения на рабочем столе, pycharm ide. но когда я запускаю приведенный ниже программный код на pycharm, он не может запустить функцию open_image. появляется такая ошибка: . но если я использую идею кода Visual Studio, такая ошибка не работает, хотя я не обнаруживаю bus.jpg

      (deteksi-abjad-bisindo) PS D:\SKRIPSI\deteksi-abjad-bisindo> python .\main.py
pygame 2.4.0 (SDL 2.26.4, Python 3.11.4)
Hello from the pygame community. https://www.pygame.org/contribute.html
WARNING  'source' is missing. Using 'source=https://ultralytics.com/images/bus.jpg'.

Found https:\ultralytics.com\images\bus.jpg locally at bus.jpg
Exception in Tkinter callback
Traceback (most recent call last):
  File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\tkinter\__init__.py", line 1948, in __call__
    return self.func(*args)
           ^^^^^^^^^^^^^^^^
  File "D:\SKRIPSI\deteksi-abjad-bisindo\identifying.py", line 151, in open_image
    results = model(image)
              ^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\engine\model.py", line 111, in __call__
    return self.predict(source, stream, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\engine\model.py", line 255, in predict
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\data\dataloaders\stream_loaders.py", line 224, in __next__        
    raise FileNotFoundError(f'Image Not Found {path}')
FileNotFoundError: Image Not Found D:\SKRIPSI\deteksi-abjad-bisindo\bus.jpg
(deteksi-abjad-bisindo) PS D:\SKRIPSI\deteksi-abjad-bisindo> python .\identifying.py
pygame 2.4.0 (SDL 2.26.4, Python 3.11.4)
Hello from the pygame community. https://www.pygame.org/contribute.html
WARNING  'source' is missing. Using 'source=https://ultralytics.com/images/bus.jpg'.

Found https:\ultralytics.com\images\bus.jpg locally at bus.jpg
Exception in Tkinter callback
Traceback (most recent call last):
  File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\tkinter\__init__.py", line 1948, in __call__
    return self.func(*args)
           ^^^^^^^^^^^^^^^^
  File "D:\SKRIPSI\deteksi-abjad-bisindo\identifying.py", line 151, in open_image
    results = model(image)
              ^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\engine\model.py", line 111, in __call__
    return self.predict(source, stream, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\engine\model.py", line 255, in predict
    return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
                                                                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\engine\predictor.py", line 188, in __call__
    return list(self.stream_inference(source, model))  # merge list of Result into one
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\User\.virtualenvs\deteksi-abjad-bisindo\Lib\site-packages\ultralytics\yolo\data\dataloaders\stream_loaders.py", line 224, in __next__        
    raise FileNotFoundError(f'Image Not Found {path}')
FileNotFoundError: Image Not Found D:\SKRIPSI\deteksi-abjad-bisindo\bus.jpg

** Кодовая программа**

      import cv2
import tkinter as tk
from pathlib import Path
from tkinter import Label, filedialog
from ultralytics import YOLO
from PIL import Image, ImageTk
from ultralytics.yolo.utils.plotting import Annotator
from gtts import gTTS
from tkinter import messagebox
import pygame
import os

ASSETS_PATH = Path(__file__).resolve().parent / "assets/images"

model = YOLO('models/best.pt')

window = tk.Tk()
window.title('Deteksi BISINDO')
window.config(bg='#4c1d95')

screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()

window_width = 800
window_height = 680

x = int((screen_width / 2) - (window_width / 2))
y = int((screen_height / 2) - (window_height / 2))

window.geometry(f'{window_width}x{window_height}+{x}+{y}')

label_judul = tk.Label(window, fg='#FFF', width=540, font=(
    "Open Sans", 14, "bold"), text="Deteksi Bahasa Isyarat Indonesia (BISINDO)", bg='#4c1d95')
label_judul.pack(ipadx=5, ipady=5, pady=10)

# Create a label for the initial image
initial_image_label = Label(window, bg='#4c1d95')
initial_image_label.place(x=85, y=140, width=640, height=480)
original_image_path = ASSETS_PATH / "bisindo.png"


def reset_image():
    initial_image = Image.open(original_image_path)
    initial_image = initial_image.resize((640, 480), Image.Resampling.LANCZOS)
    initial_image_tk = ImageTk.PhotoImage(initial_image)
    initial_image_label.config(image=initial_image_tk)
    initial_image_label.image = initial_image_tk


def open_video():
    video_path = filedialog.askopenfilename(
        title="Select Video", filetypes=[("Video files", "*.mp4")])

    if video_path:
        cap = cv2.VideoCapture(video_path)
        frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        aspect_ratio = frame_width / frame_height

        if aspect_ratio > 1:
            output_width = 640
            output_height = int(output_width / aspect_ratio)
        else:
            output_height = 640
            output_width = int(output_height * aspect_ratio)

        while cap.isOpened():
            success, frame = cap.read()

            if success:
                resized_frame = cv2.resize(
                    frame, (output_width, output_height))

                results = model(resized_frame)

                annotated_frame = results[0].plot()

                cv2.imshow("Detection by Video", annotated_frame)

                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break
            else:
                break

        cap.release()
        cv2.destroyAllWindows()


def perform_object_detection(frame):
    results = model.predict(frame, verbose=False)
    annotator = Annotator(frame)
    detected_objects = []

    for r in results:
        boxes = r.boxes
        for box in boxes:
            b = box.xyxy[0]
            c = box.cls
            annotator.box_label(b, model.names[int(c)])
            detected_objects.append(model.names[int(c)])

    annotated_frame = annotator.result()
    annotated_image = results[0].plot()

    if detected_objects:
        text_to_speech = ", ".join(detected_objects)

        try:
            tts = gTTS(text=text_to_speech, lang='id')
            tts.save("output.mp3")
            play_speech()
        except:
            pass

    return annotated_frame, annotated_image


def open_camera():
    reset_image()  # Reset gambar hasil prediksi sebelumnya sebelum memulai mode kamera
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    frame_rate = 30
    delay = int(1000 / frame_rate)

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        annotated_frame, annotated_image = perform_object_detection(frame)

        cv2.imshow('Detection Realtime', annotated_image)

        if cv2.waitKey(delay) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()

    reset_image()  # Menghapus gambar hasil prediksi setelah keluar dari mode kamera


def open_image():
    image_path = filedialog.askopenfilename(title="Select Image", filetypes=[
                                            ("Image files", "*.jpg;*.jpeg;*.png")])

    if image_path:
        image = cv2.imread(image_path)
        results = model(image)
        annotated_image = results[0].plot()
        resized_image = cv2.resize(annotated_image, (640, 480))
        pil_image = Image.fromarray(
            cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB))
        tk_image = ImageTk.PhotoImage(pil_image)

        # Update the initial image label with the selected image
        initial_image_label.config(image=tk_image)
        initial_image_label.image = tk_image

        detected_objects = []

        for r in results:
            boxes = r.boxes
            for box in boxes:
                c = box.cls
                detected_objects.append(model.names[int(c)])

        if detected_objects:
            text_to_speech = ", ".join(detected_objects)
        else:
            text_to_speech = "Objek tidak terdeteksi"

        tts = gTTS(text=text_to_speech, lang='id')
        tts.save("output.mp3")

        # Validation
        last_folder_name = os.path.basename(os.path.dirname(image_path))
        prediction = ", ".join(detected_objects)

        if prediction == last_folder_name:
            result_message = f"Hasil Identifikasi BENAR! {prediction}"
            result_icon = "info"
        else:
            result_message = f"Hasil Identifikasi SALAH! {prediction}"
            result_icon = "error"

        # Update the initial image label
        initial_image_label.update()

        # Play speech
        play_speech()

        # Show message box with prediction result
        messagebox.showinfo("Hasil Identifikasi Abjad BISINDO",
                            result_message, icon=result_icon)

        # Bind 'q' key to clear_image() function
        initial_image_label.bind('<KeyPress-q>', clear_image)

        # Set focus to initial_image_label to capture key events
        initial_image_label.focus_set()


def clear_image():
    reset_image()


def play_speech():
    pygame.mixer.init()
    pygame.mixer.music.load("output.mp3")
    pygame.mixer.music.play()

    while pygame.mixer.music.get_busy():
        pygame.time.Clock().tick(10)

    pygame.mixer.music.stop()
    pygame.mixer.music.unload()

    os.remove("output.mp3")


frame = tk.Frame(window, bg='#F2B33D')

btn_video = tk.PhotoImage(file=ASSETS_PATH / "detect-video.png")
video_button = tk.Button(
    image=btn_video, borderwidth=0, highlightthickness=0, bg='#4c1d95', activebackground='#4c1d95',
    command=open_video, relief="solid")
video_button.place(x=70, y=70, width=203, height=39)

btn_camera = tk.PhotoImage(file=ASSETS_PATH / "detect-realtime.png")
camera_button = tk.Button(
    image=btn_camera, borderwidth=0, highlightthickness=0, bg='#4c1d95', activebackground='#4c1d95',
    command=open_camera, relief="solid")
camera_button.place(x=310, y=70, width=203, height=39)

btn_image = tk.PhotoImage(file=ASSETS_PATH / "detect-image.png")
image_button = tk.Button(
    image=btn_image, borderwidth=0, highlightthickness=0, bg='#4c1d95', activebackground='#4c1d95',
    command=open_image, relief="solid")
image_button.place(x=540, y=70, width=203, height=39)

# Set initial image
reset_image()

copyright_text = tk.Label(
    text="© 2023 Agung Ma'ruf • 1911500518",
    bg="#4C1D95", fg="white", justify="left",
    font=("Open Sans", 8, "italic"))
copyright_text.place(x=325.0, y=635.0)

window.resizable(False, False)
frame.pack(expand=True)
window.mainloop()

можно реализовать идею pycharm

0 ответов

Другие вопросы по тегам