VS-Code-Erweiterung, die Dokumentation mithilfe von KI generiert

Es gibt diese ziemlich neue Erweiterung auf dem VS Code-Marktplatz namens AI Doc Writer für JavaScript, TypeScript und Python. Hier ist, was die README sagt:

Schritt 1 Code markieren
Schritt 2 Klicken Sie auf die Schaltfläche Dokumente schreiben (oder drücken Sie ⌘ + .)

Also habe ich es getestet, um zu sehen, wie gut es wirklich ist.

So lief es bei einem JS-Code:

/**
 * return a set of all the subjects in the fullCoursesArray.
 * @returns A set of all the subjects.
 */
function getAllSubjects(): ReadonlySet<string> {
  const set = new Set<string>();
  fullCoursesArray.forEach(it => set.add(it.subject));
  return set;
}

/**
 * For each subject, if it has a color, do nothing. Otherwise, give it a random color from the color
set.
 * @param subjectColors - The subject colors that are currently in use.
 * @returns A new object with the same keys as the original object, but with new values.
 */
export function allocateAllSubjectColor(
  subjectColors: Record<string, string>
): Record<string, string> {
  const subjectsColorsCopy = { ...subjectColors };
  getAllSubjects().forEach(subject => {
    if (subjectsColorsCopy[subject]) return;
    subjectsColorsCopy[subject] = coursesColorSet[
      Math.floor(Math.random() * coursesColorSet.length)
    ].hex.substring(1);
  });
  return subjectsColorsCopy;
}

/**
 * Update the subjectColors object with the new color for the given subject.
 * @param subjectColors - The current subject colors.
 * @param {string} color - The color to be applied to the subject.
 * @param {string} code - The subject code of the subject to update.
 * @returns A new object with the updated color.
 */
export function updateSubjectColor(
  subjectColors: Record<string, string>,
  color: string,
  code: string
): Record<string, string> {
  const subjectsColorsCopy = { ...subjectColors };
  getAllSubjects().forEach(subject => {
    if (subject === code) {
      subjectsColorsCopy[subject] = color;
    }
  });
  return subjectsColorsCopy;
}

/**
 * When the user clicks outside of the element, the `clickOutside` event handler is called.
 */
export const clickOutside = {
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
  beforeMount(el: any, binding: any): void {
    el.clickOutsideEvent = (event: Event) => {
      if (!(el === event.target || el.contains(event.target))) {
        binding.value(event, el);
      }
    };
    document.body.addEventListener('click', el.clickOutsideEvent);
  },
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
  unmounted(el: any): void {
    document.body.removeEventListener('click', el.clickOutsideEvent);
  },
};

Ich habe es auch mit Python-Code versucht:

def collect_dataset():
    '''
    The function is used to collect the data from the github repository.


    :return: A matrix of the dataset.
    '''
    response = requests.get(
        "https://raw.githubusercontent.com/yashLadha/"
        + "The_Math_of_Intelligence/master/Week1/ADRvs"
        + "Rating.csv"
    )
    lines = response.text.splitlines()
    data = []
    for item in lines:
        item = item.split(",")
        data.append(item)
    data.pop(0)  # This is for removing the labels from the list
    dataset = np.matrix(data)
    return dataset


def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):
    '''
    This function runs the gradient descent algorithm.

    :param data_x: the data matrix
    :param data_y: the actual y values
    :param len_data: the number of data points
    :param alpha: learning rate
    :param theta: The initial value of theta
    :return: Theta
    '''
    n = len_data

    prod = np.dot(theta, data_x.transpose())
    prod -= data_y.transpose()
    sum_grad = np.dot(prod, data_x)
    theta = theta - (alpha / n) * sum_grad
    return theta


def sum_of_square_error(data_x, data_y, len_data, theta):
    '''
    It calculates the sum of squared error for the given data and the given theta.

    :param data_x: the data matrix
    :param data_y: the y values of the data
    :param len_data: the number of data points
    :param theta: theta vector
    :return: The sum of the squares of the errors.
    '''
    prod = np.dot(theta, data_x.transpose())
    prod -= data_y.transpose()
    sum_elem = np.sum(np.square(prod))
    error = sum_elem / (2 * len_data)
    return error


def run_linear_regression(data_x, data_y):
    '''
    Runs gradient descent on the data and returns the final theta vector.

    :param data_x: The training data
    :param data_y: The dependent variable
    :return: Theta
    '''
    iterations = 100000
    alpha = 0.0001550

    no_features = data_x.shape[1]
    len_data = data_x.shape[0] - 1

    theta = np.zeros((1, no_features))

    for i in range(0, iterations):
        theta = run_steep_gradient_descent(
            data_x, data_y, len_data, alpha, theta)
        error = sum_of_square_error(data_x, data_y, len_data, theta)
        print("At Iteration %d - Error is %.5f " % (i + 1, error))

    return theta

Was denkt ihr?