Error running Pythontranscriber.py - java

When I try to run pythontranscriber.py, it fails with the following error:
Traceback (most recent call last):
File "PythonTranscriber.py", line 14, in <module>
from edu.cmu.sphinx.decoder import Decoder
ImportError: No module named edu
My script PythonTranscriber.py:
import sys
libDir = "/home/karen/sphinx4-1.0beta5-scr/sphinx4-1.0beta5/src/sphinx4/"
classPaths = [
"sphinx4.jar",
"jsapi.jar" ]
for classPath in classPaths:
sys.path.append(libDir + classPath)
true = 1
false = 0
from edu.cmu.sphinx.decoder import Decoder
from edu.cmu.sphinx.decoder import ResultListener
from edu.cmu.sphinx.decoder.pruner import SimplePruner
from edu.cmu.sphinx.decoder.scorer import ThreadedAcousticScorer
from edu.cmu.sphinx.decoder.search import PartitionActiveListFactory
from edu.cmu.sphinx.decoder.search import SimpleBreadthFirstSearchManager
from edu.cmu.sphinx.frontend import DataBlocker
from edu.cmu.sphinx.frontend import FrontEnd
from edu.cmu.sphinx.frontend.endpoint import NonSpeechDataFilter
from edu.cmu.sphinx.frontend.endpoint import SpeechClassifier
from edu.cmu.sphinx.frontend.endpoint import SpeechMarker
from edu.cmu.sphinx.frontend.feature import DeltasFeatureExtractor
from edu.cmu.sphinx.frontend.feature import LiveCMN
from edu.cmu.sphinx.frontend.filter import Preemphasizer
from edu.cmu.sphinx.frontend.frequencywarp import MelFrequencyFilterBank
from edu.cmu.sphinx.frontend.transform import DiscreteCosineTransform
from edu.cmu.sphinx.frontend.transform import DiscreteFourierTransform
from edu.cmu.sphinx.frontend.util import AudioFileDataSource
from edu.cmu.sphinx.frontend.window import RaisedCosineWindower
from edu.cmu.sphinx.instrumentation import BestPathAccuracyTracker
from edu.cmu.sphinx.instrumentation import MemoryTracker
from edu.cmu.sphinx.instrumentation import SpeedTracker
from edu.cmu.sphinx.jsapi import JSGFGrammar
from edu.cmu.sphinx.linguist.acoustic import UnitManager
from edu.cmu.sphinx.linguist.acoustic.tiedstate import Sphinx3Loader
from edu.cmu.sphinx.linguist.acoustic.tiedstate import TiedStateAcousticModel
from edu.cmu.sphinx.linguist.dictionary import FastDictionary
from edu.cmu.sphinx.linguist.flat import FlatLinguist
from edu.cmu.sphinx.recognizer import Recognizer
from edu.cmu.sphinx.util import LogMath
from java.util.logging import Logger
from java.util.logging import Level
from java.net import URL
from java.util import ArrayList
# if (args.length < 1) {
# throw new Error("USAGE: GroovyTranscriber <sphinx4 root> [<WAV file>]")
# }
root = "../../.."
# init common
Logger.getLogger("").setLevel(Level.WARNING)
logMath = LogMath(1.0001, true)
absoluteBeamWidth = -1
relativeBeamWidth = 1E-80
wordInsertionProbability = 1E-36
languageWeight = 8.0
# init audio data
audioSource = AudioFileDataSource(3200, None)
audioURL = URL("file:" + root + "/src/apps/edu/cmu/sphinx/demo/transcriber/10001-90210-01803.wav")
# (args.length > 1) ?
# File(args[0]).toURI().toURL() : audioSource.setAudioFile(audioURL, None)
# init front end
dataBlocker = DataBlocker(
10 # blockSizeMs ) speechClassifier = SpeechClassifier(
10, # frameLengthMs,
0.003, # adjustment,
10, # threshold,
0 # minSignal
)
speechMarker = SpeechMarker(
200, # startSpeechTime,
500, # endSilenceTime,
100, # speechLeader,
50, # speechLeaderFrames
100 # speechTrailer
)
nonSpeechDataFilter = NonSpeechDataFilter()
premphasizer = Preemphasizer(
0.97 # preemphasisFactor
)
windower = RaisedCosineWindower(
0.46, # double alpha
25.625, # windowSizeInMs
10.0 # windowShiftInMs
)
fft = DiscreteFourierTransform(
-1, # numberFftPoints
false # invert
)
melFilterBank = MelFrequencyFilterBank(
130.0, # minFreq,
6800.0, # maxFreq,
40 # numberFilters
)
dct = DiscreteCosineTransform(
40, # numberMelFilters,
13 # cepstrumSize
)
cmn = LiveCMN(
12.0, # initialMean,
100, # cmnWindow,
160 # cmnShiftWindow
)
featureExtraction = DeltasFeatureExtractor(
3 # window
)
pipeline = [
audioSource,
dataBlocker,
speechClassifier,
speechMarker,
nonSpeechDataFilter,
premphasizer,
windower,
fft,
melFilterBank,
dct,
cmn,
featureExtraction ]
frontend = FrontEnd(pipeline)
# init models
unitManager = UnitManager()
modelLoader = Sphinx3Loader(
"file:" + root + "/models/acoustic/tidigits/model.props",
logMath,
unitManager,
true,
true,
39,
"file:" + root + "/models/acoustic/tidigits/wd_dependent_phone.500.mdef",
"file:" + root + "/models/acoustic/tidigits/wd_dependent_phone.cd_continuous_8gau/",
0.0,
1e-7,
0.0001,
true)
model = TiedStateAcousticModel(modelLoader, unitManager, true)
dictionary = FastDictionary(
URL("file:" + root + "/models/acoustic/tidigits/dictionary"),
URL("file:" + root + "/models/acoustic/tidigits/fillerdict"),
ArrayList(),
false,
"<sil>",
false,
false,
unitManager)
# init linguist
grammar = JSGFGrammar(
# URL baseURL,
URL("file:" + root + "/src/apps/edu/cmu/sphinx/demo/transcriber/"),
logMath, # LogMath logMath,
"digits", # String grammarName,
false, # boolean showGrammar,
false, # boolean optimizeGrammar,
false, # boolean addSilenceWords,
false, # boolean addFillerWords,
dictionary # Dictionary dictionary
)
linguist = FlatLinguist(
model, # AcousticModel acousticModel,
logMath, # LogMath logMath,
grammar, # Grammar grammar,
unitManager, # UnitManager unitManager,
wordInsertionProbability, # double wordInsertionProbability,
1.0, # double silenceInsertionProbability,
1.0, # double fillerInsertionProbability,
1.0, # double unitInsertionProbability,
languageWeight, # float languageWeight,
false, # boolean dumpGStates,
false, # boolean showCompilationProgress,
false, # boolean spreadWordProbabilitiesAcrossPronunciations,
false, # boolean addOutOfGrammarBranch,
1.0, # double outOfGrammarBranchProbability,
1.0, # double phoneInsertionProbability,
None # AcousticModel phoneLoopAcousticModel
)
# init recognizer
scorer = ThreadedAcousticScorer(frontend, None, 10, true, 0)
pruner = SimplePruner()
activeListFactory = PartitionActiveListFactory(absoluteBeamWidth, relativeBeamWidth, logMath)
searchManager = SimpleBreadthFirstSearchManager(
logMath, linguist, pruner,
scorer, activeListFactory,
false, 0.0, 0, false)
decoder = Decoder(searchManager,
false, false,
ArrayList(),
100000)
recognizer = Recognizer(decoder, None)
# allocate the resourcs necessary for the recognizer recognizer.allocate()
# Loop unitl last utterance in the audio file has been decoded, in which case the recognizer will return None.
result = recognizer.recognize()
while (result != None):
resultText = result.getBestResultNoFiller()
print resultText
result = recognizer.recognize()
I have Jython installed already!
Please help me!

To load Java code, try adding jars to your classpath.
If you're not familiar with how to do that, either use
export CLASSPATH=$CLASSPATH:/path/to/sphinx4.jar:/path/jsapi.jar
Or add it to your command line script:
java -cp /path/to/jars -jar /home/karen/jython-installer-2.5.3/jython.jar PythonTranscriber.py
EDIT:
So, I followed the instructions here to download and install Sphinx-4. The path to the jar files was different than you had in your script. They were under the sphinx4/lib directory. You may want to double-check the correctness of your path.
I ran your script as below (modified, but no major changes):
import sys,os
lib_dir = '/home/.../jars/sphinx4/lib/'
classpaths = [ 'sphinx4.jar', 'jsapi.jar' ]
for cp in classpaths:
sys.path.append(os.path.join(lib_dir,cp))
from edu.cmu.sphinx.decoder import Decoder
from ... import ...
etc.
from java.util import ArrayList
with the command:
jython python_transcriber.py
This almost worked, except that you have one more import error:
from edu.cmu.sphinx.jsapi import JSGFGrammar
should, according to the javadoc, be:
from edu.cmu.sphinx.jsgf import JSGFGrammar
That took care of it for me. Didn't try the rest of the code, but at least all the imports work.

Related

PySpark SparkConf() equivalent of spark command option "--jars"

I'd like to run some PySpark script on JupyterLab, and create custom UDF from JAR packages. To do so I need to broadcast these JAR packages to executor nodes. This answer has showed the command line interface approach (invoking --jars option in spark-submit). But I'd like to know the SparkConf() approach. On my JupyterLab sc.version=3.3.0-SNAPSHOT.
I'm very new to Spark.. your help will be highly appreciated!
Code:
import findspark
findspark.init()
findspark.find()
import pyspark
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import os
# ------------ create spark session ------------
app_name = 'PySpark_Example'
path = os.getcwd()
conf = SparkConf().setAppName(os.environ.get('JUPYTERHUB_USER').replace(" ", "") + "_" + app_name).setMaster(
'spark://spark-master-svc.spark:7077')
command = os.popen("hostname -i")
hostname = command.read().split("\n")[0]
command.close()
conf.set("spark.scheduler.mode","FAIR")
conf.set("spark.deployMode","client")
conf.set("spark.driver.host",hostname)
conf.set('spark.extraListeners','sparkmonitor.listener.JupyterSparkMonitorListener')
conf.set("spark.jars", "{path}/my_func.jar,{path}/javabuilder.jar".format(path=path))
conf.set("spark.executor.extraClassPath", "{path}/".format(path=path))
sc = pyspark.SparkContext(conf=conf)
spark = SparkSession(sc)
spark._jsc.addJar("{}/my_func.jar".format(path))
spark._jsc.addJar("{}/javabuilder.jar".format(path))
# ------------- create sample dataframe ---------
sdf = spark.createDataFrame(
[
(1, 2.),
(2, 3.),
(3, 5.),
],
["col1", "col2"]
)
sdf.createOrReplaceTempView("temp_table")
# -------------- create UDF ----------------------
create_udf_from_jar = "CREATE OR REPLACE FUNCTION my_func AS 'my_func.Class1' " + \
"USING JAR '{}/my_func.jar'".format(path)
spark.sql(create_udf_from_jar)
spark.sql("SHOW USER FUNCTIONS").show()
# -------------- test ----------------------------
spark.sql("SELECT my_func(col1) FROM temp_table").show()
Error:
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
~tmp/ipykernel_4398/644670379.py in <cell line: 1>()
----> 1 spark.sql("SELECT my_func(col1) FROM temp_table").show()
~opt/spark/python/pyspark/sql/session.py in sql(self, sqlQuery, **kwargs)
1033 sqlQuery = formatter.format(sqlQuery, **kwargs)
1034 try:
-> 1035 return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
1036 finally:
1037 if len(kwargs) > 0:
~opt/spark/python/lib/py4j-0.10.9.3-src.zip/py4j/java_gateway.py in __call__(self, *args)
1319
1320 answer = self.gateway_client.send_command(command)
-> 1321 return_value = get_return_value(
1322 answer, self.gateway_client, self.target_id, self.name)
1323
~opt/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
188 def deco(*a: Any, **kw: Any) -> Any:
189 try:
--> 190 return f(*a, **kw)
191 except Py4JJavaError as e:
192 converted = convert_exception(e.java_exception)
~opt/spark/python/lib/py4j-0.10.9.3-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
324 value = OUTPUT_CONVERTER[type](answer[2:], gateway_client)
325 if answer[1] == REFERENCE_TYPE:
--> 326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
328 format(target_id, ".", name), value)
Py4JJavaError: An error occurred while calling o3813.sql.
: java.lang.NoClassDefFoundError: com/mathworks/toolbox/javabuilder/internal/MWComponentInstance
spark.jars is the one you're looking for (Doc)

Trying to retrieve a userCertificate from Windows Active directory using java CertStore but no success

I am trying to retrive a userCertificate associated with a domain name from Windows Active Directory but having difficulties by using Java API
for example when I use 'ldapsearch' command tool, I am able to retrieve the certificate as you can see below
ldapsearch -h 192.xx.2.xx -D "CN=Administrator,CN=Users,DC=mmo,DC=co,DC=ca" -w Password -b "CN=rsa0,CN=Users,DC=mmo,DC=co,DC=ca" "userCertificate"
# extended LDIF
#
# LDAPv3
# base <CN=rsa0,CN=Users,DC=mmo,DC=co,DC=ca> with scope subtree
# filter: (objectclass=*)
# requesting: userCertificate
#
# rsa0, Users, mmo.co.ca
dn: CN=rsa0,CN=Users,DC=mmo,DC=co,DC=ca
userCertificate:: MIIDbTCCAlWgAwIBAgIEFbvHazANBgkqhkiG9w0BAQsFADBnMQswCQYDVQQG
EwJ1azEQMA4GA1UECBMHVW5rbm93bjEWMBQGA1UEBxMNcmlja21hbnN3b3J0aDERMA8GA1UEChMId
m9jYWxpbmsxDDAKBgNVBAsTA2lwczENMAsGA1UEAxMEcnNhMDAeFw0xOTExMjExNDUwNDNaFw0yOT
ExMTgxNDUwNDNaMGcxCzAJBgNVBAYTAnVrMRAwDgYDVQQIEwdVbmtub3duMRYwFAYDVQQHEw1yaWN
rbWFuc3dvcnRoMREwDwYDVQQKEwh2b2NhbGluazEMMAoGA1UECxMDaXBzMQ0wCwYDVQQDEwRyc2Ew
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0R0yCr0uU80oFG3Zg0vTbR4NSR2St+w4f
DOmoHQ27z1Q2JwhiNh1XkwC8MtVeGzRJw0pe+jXc2fMVbIqONHImOZuX6p1UMWof7fxMAIEfWq98u
OqVbvbXVLeCE9+BJGsOaiJ70Q76e8tDNTH3vg1orXAvb0O7R0Vz9I0iXjJKzUtmFEBju/m3eoa+WI
6OaBr64hJw7oz1CzPIKj0OcapFypFjr4+QKpRsHA4Nn21XrYSsT00Dk9SVK3NTjHm661crvTR6jSx
j1GrCpVdQGCQ25a2RrHIi0cmclNJmy81PngW0cpdO3p9ZsZ2vPUy5/CNbVwqPEPSlIjJtVa0Xf9O1
QIDAQABoyEwHzAdBgNVHQ4EFgQU1U7VOM/vAHL0pqZgi6TS1f0SAt8wDQYJKoZIhvcNAQELBQADgg
EBAC7fK81BHDbF8PSQO2YznZtnzCMs46TwCezyqIFzQljwYA5wxKIytV7GtV4aEUrfIFIeQIMW812
pMol9xIotULSl1I/2WI18QTIJfRAnkCZZPJIa9MU6nEGCouF1LwW9bzQzHOeI07NgCIyBryojXaxc
L/epJtVxYialdI9mBWB8KDytINrylOcP9sXYaUtkOOiU7h0sBF9XBfzXgtTkF8pB7ObX9YJnyvzTn
y2zVfeZD8Q7BtDL7AvIDcUjoHtYx5B0oD86aCNTSShmtB/ZEyqt8Kynqf+QUYQIWA3wVFjgZjCCwc
NxiXuf6H8KGW8hP+ETKnc7u9XP9GCHINf9K0I=
# search result
search: 2
result: 0 Success
# numResponses: 2
# numEntries: 1
however when I try to use the Java program, I am unable to retrive it, below is the sample java program
package CertStore;
import javax.naming.AuthenticationException;
import javax.naming.AuthenticationNotSupportedException;
import javax.naming.Context;
import javax.naming.NamingException;
import javax.naming.directory.DirContext;
import javax.naming.directory.InitialDirContext;
import javax.security.auth.x500.X500Principal;
import java.security.cert.*;
import java.util.*;
import java.io.*;
class CertStoreTest {
CertStoreTest() {
try {
LDAPCertStoreParameters lcsp =
new LDAPCertStoreParameters("192.xx.2.xx", 389);
String referenceID = "CN=rsa0,CN=Users,DC=bmo,DC=co,DC=ca";
X509CertSelector xcs = new X509CertSelector();
xcs.setSubject(referenceID);
CertStore cs = CertStore.getInstance("LDAP", lcsp);
Collection certificates = cs.getCertificates((CertSelector)xcs);
System.out.println("size: "+ certificates.size());
Iterator certificate = certificates.iterator();
while(certificate.hasNext()) {
System.out.println(certificate.next());
}
} catch(Exception e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
System.out.println("main() called.");
CertStoreTest test = new CertStoreTest();
}
}
When I run this program, I get the size as 0 where I am expecting as 1.
main() called.
size: 0
I also have openldap running on a linux system, and in the above java program if I point to that server and with appropriate domain name information, java is able to pull the certificate associated with that domain name.
Not sure what I am missing when I try to retrive certificate from Windows Active Directory.
Can anyone shed some light on this as I have been stuck for few days now.

Updating module version when updating version in dependencies (multi-module maven )

My problem: versions-maven-plugin helps me to up version in some module (let's call it A) in my multi-module maven project.
Some modules (let's call it B and C) in this project have in dependencies module A. I need to up versions for this modules (B and C) too. Sometimes, i also need to up version in other module (B-parent) where B (or C) in dependencies (A version up -> B version up -> B-parent version up). Other problem is the modules can be at different levels of nesting.
Example:
root:
---B-parent:
---B (A in dependencies)
---C-parent
---C (A in dependencies)
---A-parent:
---A
Process: A version up -> A-parent version up, C version-up -> C-parent version-up, B version-up -> B-parent version up.
This plugin can't do this.
Is there any idea how this can be done?
Or my strategy of updating versions is not good enough?
I've made a script for increasing version numbers in all dependent modules recursively with a versions-maven-plugin.
Algorithm is as follows:
Run versions:set in target module
Run versions:set in all modules which have been updated by versions:set from previous step. If the module has been already processed - skip it.
Repeat step 2
Python 2.7 code
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# How To
#
# Run script and pass module path as a first argument.
# Or run it without arguments in module dir.
#
# Script will request the new version number for each module.
# If no version provided - last digit will be incremented (1.0.0 -> 1.0.1).
# cd <module-path>
# <project-dir>/increment-version.py
# ...
# review changes and commit
from subprocess import call, Popen, PIPE, check_output
import os
import re
import sys
getVersionCommand = "mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate " \
"-Dexpression=project.version 2>/dev/null | grep -v '\['"
def getCurrentModuleVersion():
return check_output(getVersionCommand, shell=True).decode("utf-8").split("\n")[0]
def incrementLastDigit(version):
digits = version.split(".")
lastDigit = int(digits[-1])
digits[-1] = str(lastDigit+1)
return ".".join(digits)
def isUpdatedVersionInFile(version, file):
return "<version>" + version + "</version>" in \
check_output("git diff HEAD --no-ext-diff --unified=0 --exit-code -a --no-prefix {} "
"| egrep \"^\\+\"".format(file), shell=True).decode("utf-8")
def runVersionSet(version):
process = Popen(["mvn", "versions:set", "-DnewVersion="+version, "-DgenerateBackupPoms=false"], stdout=PIPE)
(output, err) = process.communicate()
exitCode = process.wait()
if exitCode is not 0:
print "Error setting the version"
exit(1)
return output, err, exitCode
def addChangedPoms(version, dirsToVisit, visitedDirs):
changedFiles = check_output(["git", "ls-files", "-m"]) \
.decode("utf-8").split("\n")
changedPoms = [f for f in changedFiles if f.endswith("pom.xml")]
changedDirs = [os.path.dirname(os.path.abspath(f)) for f in changedPoms if isUpdatedVersionInFile(version, f)]
changedDirs = [d for d in changedDirs if d not in visitedDirs and d not in dirsToVisit]
print "New dirs to visit:", changedDirs
return changedDirs
if __name__ == "__main__":
visitedDirs = []
dirsToVisit = []
if len(sys.argv) > 1:
if os.path.exists(os.path.join(sys.argv[1], "pom.xml")):
dirsToVisit.append(os.path.abspath(sys.argv[1]))
else:
print "Error. No pom.xml file in dir", sys.argv[1]
exit(1)
else:
dirsToVisit.append(os.path.abspath(os.getcwd()))
pattern = re.compile("aggregation root: (.*)")
while len(dirsToVisit) > 0:
dirToVisit = dirsToVisit.pop()
print "Visiting dir", dirToVisit
os.chdir(dirToVisit)
currentVersion = getCurrentModuleVersion()
defaultVersion = incrementLastDigit(currentVersion)
version = raw_input("New version for {}:{} ({}):".format(dirToVisit, currentVersion, defaultVersion))
if not version.strip():
version = defaultVersion
print "New version:", version
output, err, exitcode = runVersionSet(version)
rootDir = pattern.search(output).group(1)
visitedDirs = visitedDirs + [dirToVisit]
os.chdir(rootDir)
print "Adding new dirs to visit"
dirsToVisit = dirsToVisit + addChangedPoms(version, dirsToVisit, visitedDirs)

Tensorflow in Android: How do i use my linear regression model to predict a value in an android application?

I currently have a ipynb file (ipython notebook) that contains a linear regression code / model(im not entirely sure if it's a model) that I've created earlier.
How do i implement this model in an android application such that if I were to input a value of 'x' in a text box, it'll output in a textview the predicted value of 'y'. Function: Y = mx + b.
I've tried looking at different tutorials, but they were mostly not "step-by-step" guides, which made it really hard to understand, I'm a beginner at coding.
Here's my code for the model:
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
rng = np.random
from numpy import genfromtxt
from sklearn.datasets import load_boston
# Parameters
learning_rate = 0.0001
training_epochs = 1000
display_step = 50
n_samples = 222
X = tf.placeholder("float") # create symbolic variables
Y = tf.placeholder("float")
filename_queue = tf.train.string_input_producer(["C://Users//Shiina//battdata.csv"],shuffle=False)
reader = tf.TextLineReader() # skip_header_lines=1 if csv has headers
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[1.], [1.]]
height, soc= tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([height])
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred_soc = tf.add(tf.multiply(height, W), b) # XW + b <- y = mx + b where W is gradient, b is intercept
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred_soc-soc, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
_, cost_value = sess.run([optimizer,cost])
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost)
print( "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost)
print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Plot data after completing training
train_X = []
train_Y = []
for i in range(n_samples): #Your input data size to loop through once
X, Y = sess.run([height, pred_soc]) # Call pred, to get the prediction with the updated weights
train_X.append(X)
train_Y.append(Y)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.ylabel("SoC")
plt.xlabel("Height")
plt.axis([0, 1, 0, 100])
plt.plot(train_X, train_Y, linewidth=2.0)
plt.legend()
plt.show()
coord.request_stop()
coord.join(threads)

Re-using GAE Python code with GAE Java

Here's a [python code][1] that I would like to know if can also be used for GAE Java (when code is migrated). So the question is, is the python code below something that can converted to Java without any python "dependencies" that Java can't have:
# stdlib
from collections import defaultdict
from datetime import datetime, timedelta
import os
import time
# 3p
import simplejson as json
# google api
from google.appengine.api import app_identity, logservice, memcache, taskqueue
from google.appengine.ext.db import stats as db_stats
# framework
import webapp2
class DatadogStats(webapp2.RequestHandler):
def get(self):
api_key = self.request.get('api_key')
if api_key != os.environ.get('DATADOG_API_KEY'):
self.abort(403)
FLAVORS = ['requests', 'services', 'all']
flavor = self.request.get('flavor')
if flavor not in FLAVORS:
self.abort(400)
def get_task_queue_stats(queues=None):
if queues is None:
queues = ['default']
else:
queues = queues.split(',')
task_queues = [taskqueue.Queue(q).fetch_statistics() for q in queues]
q_stats = []
for q in task_queues:
stats = {
'queue_name': q.queue.name,
'tasks': q.tasks,
'oldest_eta_usec': q.oldest_eta_usec,
'executed_last_minute': q.executed_last_minute,
'in_flight': q.in_flight,
'enforced_rate': q.enforced_rate,
}
q_stats.append(stats)
return q_stats
def get_request_stats(after=None):
if after is None:
one_minute_ago = datetime.utcnow() - timedelta(minutes=1)
after = time.mktime(one_minute_ago.timetuple())
else:
# cast to float
after = float(after)
logs = logservice.fetch(start_time=after)
stats = defaultdict(list)
for req_log in logs:
stats['start_time'].append(req_log.start_time)
stats['api_mcycles'].append(req_log.api_mcycles)
stats['cost'].append(req_log.cost)
stats['finished'].append(req_log.finished)
stats['latency'].append(req_log.latency)
stats['mcycles'].append(req_log.mcycles)
stats['pending_time'].append(req_log.pending_time)
stats['replica_index'].append(req_log.replica_index)
stats['response_size'].append(req_log.response_size)
stats['version_id'].append(req_log.version_id)
return stats
stats = {
'project_name': app_identity.get_application_id()
}
if flavor == 'services' or flavor == 'all':
stats['datastore'] = db_stats.GlobalStat.all().get()
stats['memcache'] = memcache.get_stats()
stats['task_queue'] = get_task_queue_stats(self.request.get('task_queues', None))
if flavor == 'requests' or flavor == 'all':
stats['requests'] = get_request_stats(self.request.get('after', None))
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(stats))
app = webapp2.WSGIApplication([
('/datadog', DatadogStats),
])
[1]: https://github.com/DataDog/gae_datadog/blob/master/datadog.py
Yes, the code can be converted and will work in Java, but you will have to do it manually (I don't know of any tools to "translate" from Python to Java).
Looking at all the imports you have, there's nothing there that can't be used in Java.

Categories

Resources