From 0504eaf3e0dbeea659f6321ee593a7589c2db0f1 Mon Sep 17 00:00:00 2001 From: Haowen Wang <50039524+whw199833@users.noreply.github.com> Date: Wed, 6 Dec 2023 23:19:36 +0800 Subject: [PATCH] update --- example/loss/CoxRegressionLoss_test.ipynb | 41 +++++++++++ example/loss/HuberLoss_test copy.ipynb | 43 +++++++++++ example/loss/HuberLoss_test.ipynb | 43 +++++++++++ example/loss/LogLoss_test.ipynb | 43 +++++++++++ .../__pycache__/__init__.cpython-310.pyc | Bin 180 -> 0 bytes gbiz_torch/layer/interaction.py | 2 +- gbiz_torch/loss/__init__.py | 2 + gbiz_torch/loss/common_improved_loss.py | 69 ++++++++++++++++++ gbiz_torch/loss/cox_regression_loss.py | 25 +++++++ gbiz_torch/model/wnd.pyc | Bin 2318 -> 0 bytes gbiz_torch/model/xdeepfm.py | 67 +++++++++++++++++ 11 files changed, 334 insertions(+), 1 deletion(-) create mode 100644 example/loss/CoxRegressionLoss_test.ipynb create mode 100644 example/loss/HuberLoss_test copy.ipynb create mode 100644 example/loss/HuberLoss_test.ipynb create mode 100644 example/loss/LogLoss_test.ipynb delete mode 100644 gbiz_torch/__pycache__/__init__.cpython-310.pyc create mode 100644 gbiz_torch/loss/__init__.py create mode 100644 gbiz_torch/loss/common_improved_loss.py create mode 100644 gbiz_torch/loss/cox_regression_loss.py delete mode 100644 gbiz_torch/model/wnd.pyc create mode 100644 gbiz_torch/model/xdeepfm.py diff --git a/example/loss/CoxRegressionLoss_test.ipynb b/example/loss/CoxRegressionLoss_test.ipynb new file mode 100644 index 0000000..1281684 --- /dev/null +++ b/example/loss/CoxRegressionLoss_test.ipynb @@ -0,0 +1,41 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import gbiz_torch.loss import CoxRegressionLoss" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "labels = torch.randint(0, 2, (8, 1))\n", + "predict = torch.randint(0, 100, (8, 1))/100\n", + "\n", + "cox_loss = CoxRegressionLoss()\n", + "cox_loss(labels, predict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/example/loss/HuberLoss_test copy.ipynb b/example/loss/HuberLoss_test copy.ipynb new file mode 100644 index 0000000..2bdc5d6 --- /dev/null +++ b/example/loss/HuberLoss_test copy.ipynb @@ -0,0 +1,43 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import gbiz_torch.loss import MCCrossEntropy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "##test MCCrossEntropy\n", + "\n", + "labels_d = torch.cat([torch.eye(5), torch.eye(5)], dim=0)\n", + "predict = torch.randn((10, 5), requires_grad=True)\n", + "\n", + "MCCross_Entropy = MCCrossEntropy()\n", + "MCCross_Entropy(labels_d, predict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/example/loss/HuberLoss_test.ipynb b/example/loss/HuberLoss_test.ipynb new file mode 100644 index 0000000..293b111 --- /dev/null +++ b/example/loss/HuberLoss_test.ipynb @@ -0,0 +1,43 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import gbiz_torch.loss import HuberLoss" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "##test HuberLoss\n", + "\n", + "labels = torch.randn(8, 5)\n", + "predict = torch.randn(8, 5)\n", + "\n", + "Huber_Loss = HuberLoss()\n", + "Huber_Loss(labels, predict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/example/loss/LogLoss_test.ipynb b/example/loss/LogLoss_test.ipynb new file mode 100644 index 0000000..2a01e6f --- /dev/null +++ b/example/loss/LogLoss_test.ipynb @@ -0,0 +1,43 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import gbiz_torch.loss import LogLoss" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "##test LogLoss\n", + "\n", + "labels = torch.randint(0, 5, (8, 1))\n", + "predict = torch.randn((8, 5), requires_grad=True)\n", + "\n", + "Log_loss = LogLoss()\n", + "Log_loss(labels, predict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/gbiz_torch/__pycache__/__init__.cpython-310.pyc b/gbiz_torch/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index b95a6eb220f71cc53ec99c95215e098dbb05a447..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 180 zcmd1j<>g`kfy?A%PfhH*DI*J#bJ}1pHiBWY6r5gml5Z|*M+X<;s5+RT(p?yHem5kj+q7?=KZKEncYDJeyRr9i1`_}fR=RIq8 zFKHt8DfuA0^2$fyn;^s+68vV*b{tZoaOWNG&hGr#ncvKn|G2aE!>@_zv)NO||MwW~ z5G27bVku)-%;ph0DzU@Td|9UDQH33@1Wt*iRW`1%!y1c{GE>;Avq8j?3cFzJoUzx3 ztEBRvu>$q_Z#>;bgsHEC>~y<(@>JQdcx93oS)$T~XEmI7is8NlQSb?BLE#D}pfFb^ zL#o1(6?Rb#KGj%KWfu`S6dyFmhSyDzGKsSM;M93Fc5~4^ZQgYF$oq-wwA<3Y)yJ*O4%@Ql+7BN+c+`6E z`0-cU^V3gqKgwWfSDE1#FV7TzHo~k4YsboX{!`X(@*kB|V=YH~SK6^trpcA>x8@pl za5Bw;ySp+wQHH;gW;ox~W|Di?nO%$Syq$2bjKd`wJ?Z-+;q;km@7T76*dLVR@k&kX`LduH8!oY^QZ=Sfdh3Qi!8XGFOyuP`iUK0i9^91 z=_FA`~37YBGsuw05_Auan}3`~bRZ-I;^$66mtuQS63xxpec z*JRRdVb!q12Wci@4D&pdR-YAO$1=^;HJQX_6AHAIUM(aCmdwFaHsF4wg4>S!jL%ND zuBpkL5K|_B7A-wnOe?(UjySZh`x$t5wCD`ISgU1xKMx``QSH5oUS4U+Q?KuRW~#m zPu<4lsFHZmY=j`+gov@lJb@-LMubEM2+h={gV2q%5pIO85SQsRIR@oOeoUHUTF2C% zV(Kt4DQjR9#C?p}u&2+2&uo9x9#dCqpO~aIIgLNS-lrID6NGJTMXOPD>Ftv_GC^e( zN`?r)egql;`0O1jBQ(D^b!I=KT6tGt<{tB?CTxY>;k-wlM-g^WNe2y5=>7z`0+aVw z$*haq+&j=is80@*^tn~;idxFIdh4x3%+XjnH)-0w-{dFMco&+ksEQTToA;FRe>Ap# zH>er(D6Z&Rvo8<@BPKM4q<1r;0%AhG(2UT!qz^!f@6aJicM%g$`CGbKKrrE`=%4|S z_*C??QOS&VJ79Q16FxO3(k5SH>Zml>+D2(TS}om+y5Un6kSTQXS7k>zT{0YdVQ(<-^WT}G48%J%tx>;Q*NAq|6qXzB71m@se zh+xYYpph~BZP5l*0eG!>kCbWfeQ8N1o_o@NSvRdvnnkrz0`S4cMMFLR5ATCl-xNXm RHq<~Jy4ItOXfwK7{s;aTBkTYG diff --git a/gbiz_torch/model/xdeepfm.py b/gbiz_torch/model/xdeepfm.py new file mode 100644 index 0000000..419cff8 --- /dev/null +++ b/gbiz_torch/model/xdeepfm.py @@ -0,0 +1,67 @@ +# coding: utf-8 +# @Author: Haowen Wang + +import torch +import torch.nn as nn +from gbiz_torch.layer import DNNLayer, CINLayer + + +class xDeepFMModel(nn.Module): + """ + Model: xDeepFM Model + + Paper: xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems + + Link: https://arxiv.org/abs/1803.05170 + + Author: Jianxun Lian, Xiaohuan Zhou, Fuzheng Zhang, Zhongxia Chen, Xing Xie, Guangzhong Sun + + Developer: Haowen Wang + + inputs: 3d tensor (batch_size, fields, n_dim) + + outputs: 2d tensor (batch_size, out_dim) + + """ + + def __init__(self, hidden_units, act_fn='relu', l2_reg=0.001, dropout_rate=0, use_bn=False, + seed=1024, cin_hidden_units=[100, 100], cin_act_fn='relu', cin_l2_reg=0.001, name='xDeepFMModel'): + """ + Args: + hidden_units: list, unit in each hidden layer + act_fn: string, activation function + l2_reg: float, regularization value + dropout_rate: float, fraction of the units to dropout. + use_bn: boolean, if True, apply BatchNormalization in each hidden layer + seed: int, random value for initialization + hidden_units: list, unit in each cin layer + act_fn: string, activation function in cin layer + l2_reg: float, regularization value in cin layer + + """ + super(xDeepFMModel, self).__init__(name='xDeepFMModel') + self.cin_layer = CINLayer(hidden_units=cin_hidden_units, act_fn=cin_act_fn, + l2_reg=cin_l2_reg, name="{}_cin_layer".format(name)) + self.dnn_layer = DNNLayer(hidden_units=hidden_units, activation=act_fn, l2_reg=l2_reg, + dropout_rate=dropout_rate, use_bn=use_bn, seed=seed, name="{}_dnn_layer".format(name)) + + def call(self, inputs, training=None): + """ + Args: + inputs: 3d tensor (batch_size, fields, n_dim) + + Returns: + 2d tensor (batch_size, out_dim) + + """ + cin_output = self.cin_layer(inputs) + + flat_inputs = tf.keras.layers.Flatten()(inputs) + tf.logging.info('xDeepFMModel: flat_inputs {}'.format(flat_inputs)) + dnn_output = self.dnn_layer(flat_inputs, training=training) + + combined_output = tf.keras.layers.Concatenate()( + [cin_output, dnn_output]) + tf.logging.info( + 'xDeepFMModel: combined_output {}'.format(combined_output)) + return combined_output