diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb
index 419303a42..587875733 100644
--- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb
@@ -230,7 +230,9 @@
         "- **output_name:** Name of the output\n",
         "- **output_mode:** Specifies \"upload\" or \"mount\" modes for producing output (default: mount)\n",
         "- **output_path_on_compute:** For \"upload\" mode, the path to which the module writes this output during execution\n",
-        "- **output_overwrite:** Flag to overwrite pre-existing data"
+        "- **output_overwrite:** Flag to overwrite pre-existing data\n",
+        "\n",
+        "As PipelineData is using [DataReference](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.data_reference.datareference?view=azure-ml-py) to represent the data, which is not the recommanded approch, we will recommand you to use [pipeline_output_dataset](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline_output_dataset?view=azure-ml-py) instead, once promoted to an Azure Machine Learning dataset, it will also be consumed as a Dataset instead of a DataReference in subsequent steps."
       ]
     },
     {
@@ -252,8 +254,12 @@
         "#              is_directory=None)\n",
         "\n",
         "# Naming the intermediate data as processed_data1 and assigning it to the variable processed_data1.\n",
-        "processed_data1 = PipelineData(\"processed_data1\",datastore=def_blob_store)\n",
-        "print(\"PipelineData object created\")"
+        "# Promote pipelinedata to pipeline_output_dataset, which will use dataset instead of data reference \n",
+        "\n",
+        "from azureml.pipeline.core.pipeline_output_dataset import PipelineOutputFileDataset\n",
+        "\n",
+        "processed_data1 = PipelineOutputFileDataset(PipelineData(\"processed_data1\",datastore=def_blob_store))\n",
+        "print(\"PipelineOutputFileDataset object created\")"
       ]
     },
     {
@@ -544,10 +550,13 @@
       "Azure ML"
     ],
     "friendly_name": "Azure Machine Learning Pipelines with Data Dependency",
+    "interpreter": {
+      "hash": "3e9e0e270b75c5e6da2e22113ba4f77b864d68f95da6601809c29e46c73ae6bb"
+    },
     "kernelspec": {
       "display_name": "Python 3.6",
       "language": "python",
-      "name": "python36"
+      "name": "python3"
     },
     "language_info": {
       "codemirror_mode": {
@@ -559,7 +568,7 @@
       "name": "python",
       "nbconvert_exporter": "python",
       "pygments_lexer": "ipython3",
-      "version": "3.6.7"
+      "version": "3.7.8"
     },
     "order_index": 2,
     "star_tag": [
@@ -572,4 +581,4 @@
   },
   "nbformat": 4,
   "nbformat_minor": 2
-}
\ No newline at end of file
+}