diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonHadoopUtil.scala b/core/src/main/scala/org/apache/spark/api/python/PythonHadoopUtil.scala index 6259bead3ea88..84e206b0bcf6d 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonHadoopUtil.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonHadoopUtil.scala @@ -106,7 +106,7 @@ private[python] class WritableToJavaConverter( } /** - * A converter that converts common types to [[org.apache.hadoop.io.Writable]]. Note that array + * A converter that converts common types to [[org.apache.hadoop.io.Writable]]. @note Array * types are not supported since the user needs to subclass [[org.apache.hadoop.io.ArrayWritable]] * to set the type properly. See [[org.apache.spark.api.python.DoubleArrayWritable]] and * [[org.apache.spark.api.python.DoubleArrayToWritableConverter]] for an example. They are used in @@ -115,7 +115,7 @@ private[python] class WritableToJavaConverter( private[python] class JavaToWritableConverter extends Converter[Any, Writable] { /** - * Converts common data types to [[org.apache.hadoop.io.Writable]]. Note that array types are not + * Converts common data types to [[org.apache.hadoop.io.Writable]]. @note Array types are not * supported out-of-the-box. */ private def convertToWritable(obj: Any): Writable = { diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala index 0ca91b9bf86c6..988dd48adb434 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala @@ -760,7 +760,7 @@ private[spark] object PythonRDD extends Logging { /** * Output a Python RDD of key-value pairs as a Hadoop SequenceFile using the Writable types - * we convert from the RDD's key and value types. Note that keys and values can't be + * we convert from the RDD's key and value types. @note Keys and values can't be * [[org.apache.hadoop.io.Writable]] types already, since Writables are not Java * `Serializable` and we can't peek at them. The `path` can be on any Hadoop file system. */