Skip to content
This repository has been archived by the owner on Nov 8, 2018. It is now read-only.

Error in prediction with multi-features LTSM autoencoder #65

Open
raouflamari opened this issue Apr 23, 2018 · 0 comments
Open

Error in prediction with multi-features LTSM autoencoder #65

raouflamari opened this issue Apr 23, 2018 · 0 comments

Comments

@raouflamari
Copy link

raouflamari commented Apr 23, 2018

I am implementing an LSTM autoencoder to reconstruct a multi-features sequence

The columns of my dataframe are: 0_F1, 0_F2, 0_F3, 0_F4, 0_F5, 1_F1, 1_F2, 1_F3, 1_F4, 1_F5, 2_F1, 2_F2, 2_F3, 2_F4, 2_F5

Each row if a sequence of 5 features within 3 timesteps

Here is my code

# Feature preparing
timesteps = 3
features = ['0_F1', '0_F2', '0_F3', '0_F4', '0_F5', '1_F1', '1_F2', '1_F3', '1_F4', '1_F5', '2_F1', '2_F2', '2_F3', '2_F4', '2_F5']
assembler = VectorAssembler(inputCols=features, outputCol="features")
dataset = assembler.transform(df)
scaler = MinMaxScaler(inputCol="features", outputCol="features_scaled")
scaler_model = scaler.fit(dataset)
dataset = scaler_model.transform(dataset)

# Reshaping
dimension = int(len(features)/timesteps)
reshape_transformer = ReshapeTransformer("features_scaled", "matrix", (timesteps, dimension))
dataset = reshape_transformer.transform(dataset)

# Modeling
inputs = Input(shape=(timesteps, dimension))
encoded = LSTM(50)(inputs)
decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(dimension, return_sequences=True)(decoded)              
sequence_autoencoder = Model(inputs, decoded)

# dataset_healthy is a sample of dataset
trainer = SingleTrainer(keras_model=sequence_autoencoder, worker_optimizer="adam", loss="mse", metrics=["mse"], batch_size=1, features_col="matrix", label_col="matrix", num_epoch=1)
trained_model = trainer.train(dataset_healthy)

# dataset_test is a sample of dataset
predictor = ModelPredictor(keras_model=sequence_autoencoder, features_col="matrix")
p = predictor.predict(dataset_test)
p.show(5)

I get this error when trying to predict

---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-70-2fb1fb90ca99> in <module>()
----> 1 p.select('matrix').head()

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\dataframe.py in head(self, n)
    968         """
    969         if n is None:
--> 970             rs = self.head(1)
    971             return rs[0] if rs else None
    972         return self.take(n)

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\dataframe.py in head(self, n)
    970             rs = self.head(1)
    971             return rs[0] if rs else None
--> 972         return self.take(n)
    973 
    974     @ignore_unicode_prefix

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\dataframe.py in take(self, num)
    474         [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
    475         """
--> 476         return self.limit(num).collect()
    477 
    478     @since(1.3)

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\dataframe.py in collect(self)
    436         """
    437         with SCCallSiteSync(self._sc) as css:
--> 438             port = self._jdf.collectToPython()
    439         return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
    440 

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\py4j-0.10.4-src.zip\py4j\java_gateway.py in __call__(self, *args)
   1131         answer = self.gateway_client.send_command(command)
   1132         return_value = get_return_value(
-> 1133             answer, self.gateway_client, self.target_id, self.name)
   1134 
   1135         for temp_arg in temp_args:

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\utils.py in deco(*a, **kw)
     61     def deco(*a, **kw):
     62         try:
---> 63             return f(*a, **kw)
     64         except py4j.protocol.Py4JJavaError as e:
     65             s = e.java_exception.toString()

D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\py4j-0.10.4-src.zip\py4j\protocol.py in get_return_value(answer, gateway_client, target_id, name)
    317                 raise Py4JJavaError(
    318                     "An error occurred while calling {0}{1}{2}.\n".
--> 319                     format(target_id, ".", name), value)
    320             else:
    321                 raise Py4JError(

Py4JJavaError: An error occurred while calling o5231.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 75.0 failed 1 times, most recent failure: Lost task 0.0 in stage 75.0 (TID 938, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\worker.py", line 177, in main
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\worker.py", line 172, in process
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\serializers.py", line 268, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 576, in toInternal
    return tuple(f.toInternal(v) for f, v in zip(self.fields, obj))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 576, in <genexpr>
    return tuple(f.toInternal(v) for f, v in zip(self.fields, obj))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 436, in toInternal
    return self.dataType.toInternal(obj)
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 654, in toInternal
    return self._cachedSqlType().toInternal(self.serialize(obj))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\mllib\linalg\__init__.py", line 166, in serialize
    values = [float(v) for v in obj]
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\mllib\linalg\__init__.py", line 166, in <listcomp>
    values = [float(v) for v in obj]
TypeError: only size-1 arrays can be converted to Python scalars

	at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193)
	at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:234)
	at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152)
	at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:63)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
	at org.apache.spark.scheduler.Task.run(Task.scala:108)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
	at java.lang.Thread.run(Unknown Source)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
	at scala.Option.foreach(Option.scala:257)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
	at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
	at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply$mcI$sp(Dataset.scala:2803)
	at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:2800)
	at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:2800)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
	at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2823)
	at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:2800)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
	at java.lang.reflect.Method.invoke(Unknown Source)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
	at py4j.Gateway.invoke(Gateway.java:280)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.GatewayConnection.run(GatewayConnection.java:214)
	at java.lang.Thread.run(Unknown Source)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\worker.py", line 177, in main
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\worker.py", line 172, in process
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\serializers.py", line 268, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 576, in toInternal
    return tuple(f.toInternal(v) for f, v in zip(self.fields, obj))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 576, in <genexpr>
    return tuple(f.toInternal(v) for f, v in zip(self.fields, obj))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 436, in toInternal
    return self.dataType.toInternal(obj)
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\sql\types.py", line 654, in toInternal
    return self._cachedSqlType().toInternal(self.serialize(obj))
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\mllib\linalg\__init__.py", line 166, in serialize
    values = [float(v) for v in obj]
  File "D:\workspace\hadoop\spark-2.2.0-bin-hadoop2.6\python\lib\pyspark.zip\pyspark\mllib\linalg\__init__.py", line 166, in <listcomp>
    values = [float(v) for v in obj]
TypeError: only size-1 arrays can be converted to Python scalars

	at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193)
	at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:234)
	at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152)
	at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:63)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
	at org.apache.spark.scheduler.Task.run(Task.scala:108)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
	... 1 more
Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant