1 분 소요

LINE를 numpy 바탕으로 하드코딩하여 구현한 것을 파트별로 구분한 것입니다.

Negative sampling

class LINE:
    ...
    def ns_optimizing(self, target, context, dot_prod_mat, t_j):
        sigmoid = self.sigmoid(dot_prod_mat[target, context])

        sigmoid = sigmoid.reshape((sigmoid.size, 1))
        derivative = (sigmoid - t_j)
        obj = - np.sum(np.log(self.sigmoid(np.where(t_j == 1, 1, -1) * dot_prod_mat[target, context]
                                       .reshape(sigmoid.size, 1))))
        return derivative, obj

    def propagating(self):
        # First Order Proximity : (34,34) / (context, target)
        prior_fst_emb = self.fst_emb.copy()
        fst_dot_prod_mat = self.fst_emb @ self.fst_emb.T

        # Second Order Proximity: (34,34) / (context, target)
        prior_snd_target = self.snd_target.copy()
        prior_snd_context = self.snd_context.copy()
        snd_dot_prod_mat = self.snd_target @ self.snd_context.T

        # Loss Value
        fst_obj = []
        snd_obj = []

        case_num = len(self.adj_mat_idx)
        # Update 식에서 Unweighted graph 의 경d우 weight를 호출할 필요가 없어 생략하였다.
        for idx in np.random.choice(case_num, case_num, replace=False):
            target, context = self.adj_mat_idx[idx]

            # Negative Sampling
            ns = np.random.choice(np.arange(self.nodes_num), self.ns_num, replace=False, p=self.noise_dist[:, target])
            self.negative_count[target, ns] += 1

            # pn = self.noise_dist[target, ns]
            # pn = np.append(1, pn).reshape((self.ns_num+1, 1))
            indicator = np.append(context, ns)
            t_j = np.zeros((self.ns_num+1, 1))
            t_j[0] = 1

            fst_derivative, fst_obj_indi = self.ns_optimizing(target, indicator, fst_dot_prod_mat, t_j)
            fst_obj.append(fst_obj_indi)
            fst_prior = prior_fst_emb[indicator]
            self.fst_emb[target] -= self.fst_lr * np.sum(fst_derivative * fst_prior, axis=0)
            self.fst_emb[indicator] -= self.fst_lr * fst_derivative * prior_fst_emb[target]

            snd_derivative, snd_obj_indi = self.ns_optimizing(target, indicator, snd_dot_prod_mat, t_j)
            snd_obj.append(snd_obj_indi)
            snd_context_prior = prior_snd_context[indicator]
            self.snd_target[target] -= self.snd_lr * np.sum(snd_derivative * snd_context_prior, axis=0)
            self.snd_context[indicator] -= self.snd_lr * snd_derivative * prior_snd_target[target]

        return np.average(fst_obj), np.average(snd_obj)

Process

class LINE:
    ...
    def propagating(self):
        # First Order Proximity : (34,34) / (context, target)
        prior_fst_emb = self.fst_emb.copy()
        fst_dot_prod_mat = self.fst_emb @ self.fst_emb.T

        # Second Order Proximity: (34,34) / (context, target)
        prior_snd_target = self.snd_target.copy()
        prior_snd_context = self.snd_context.copy()
        snd_dot_prod_mat = self.snd_target @ self.snd_context.T

        # Loss Value
        fst_obj = []
        snd_obj = []

        case_num = len(self.adj_mat_idx)
        # Update 식에서 Unweighted graph 의 경d우 weight를 호출할 필요가 없어 생략하였다.
        for idx in np.random.choice(case_num, case_num, replace=False):
            target, context = self.adj_mat_idx[idx]

            # Negative Sampling
            ns = np.random.choice(np.arange(self.nodes_num), self.ns_num, replace=False, p=self.noise_dist[:, target])
            self.negative_count[target, ns] += 1

            # pn = self.noise_dist[target, ns]
            # pn = np.append(1, pn).reshape((self.ns_num+1, 1))
            indicator = np.append(context, ns)
            t_j = np.zeros((self.ns_num+1, 1))
            t_j[0] = 1

            fst_derivative, fst_obj_indi = self.ns_optimizing(target, indicator, fst_dot_prod_mat, t_j)
            fst_obj.append(fst_obj_indi)
            fst_prior = prior_fst_emb[indicator]
            self.fst_emb[target] -= self.fst_lr * np.sum(fst_derivative * fst_prior, axis=0)
            self.fst_emb[indicator] -= self.fst_lr * fst_derivative * prior_fst_emb[target]

            snd_derivative, snd_obj_indi = self.ns_optimizing(target, indicator, snd_dot_prod_mat, t_j)
            snd_obj.append(snd_obj_indi)
            snd_context_prior = prior_snd_context[indicator]
            self.snd_target[target] -= self.snd_lr * np.sum(snd_derivative * snd_context_prior, axis=0)
            self.snd_context[indicator] -= self.snd_lr * snd_derivative * prior_snd_target[target]

        return np.average(fst_obj), np.average(snd_obj)

댓글남기기