Skip to content

Commit 8d9148c

Browse files
authored
Prep 0.7.0 release (#361)
1 parent 412eb2d commit 8d9148c

File tree

11 files changed

+79
-26
lines changed

11 files changed

+79
-26
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
<tbody align="center">
4040
<tr>
4141
<td >2.3.*</td>
42-
<td rowspan=5><a href="https://github.com/dotnet/spark/releases/tag/v0.6.0">v0.6.0</a></td>
42+
<td rowspan=5><a href="https://github.com/dotnet/spark/releases/tag/v0.7.0">v0.7.0</a></td>
4343
</tr>
4444
<tr>
4545
<td>2.4.0</td>

benchmark/scala/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
<modelVersion>4.0.0</modelVersion>
44
<groupId>com.microsoft.spark</groupId>
55
<artifactId>microsoft-spark-benchmark</artifactId>
6-
<version>0.6.0</version>
6+
<version>0.7.0</version>
77
<inceptionYear>2019</inceptionYear>
88
<properties>
99
<encoding>UTF-8</encoding>

docs/building/ubuntu-instructions.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -184,15 +184,15 @@ Once you build the samples, you can use `spark-submit` to submit your .NET Core
184184
spark-submit \
185185
--class org.apache.spark.deploy.dotnet.DotnetRunner \
186186
--master local \
187-
~/dotnet.spark/src/scala/microsoft-spark-2.3.x/target/microsoft-spark-2.3.x-0.6.0.jar \
187+
~/dotnet.spark/src/scala/microsoft-spark-<version>/target/microsoft-spark-<version>.jar \
188188
Microsoft.Spark.CSharp.Examples Sql.Batch.Basic $SPARK_HOME/examples/src/main/resources/people.json
189189
```
190190
- **[Microsoft.Spark.Examples.Sql.Streaming.StructuredNetworkWordCount](../../examples/Microsoft.Spark.CSharp.Examples/Sql/Streaming/StructuredNetworkWordCount.cs)**
191191
```bash
192192
spark-submit \
193193
--class org.apache.spark.deploy.dotnet.DotnetRunner \
194194
--master local \
195-
~/dotnet.spark/src/scala/microsoft-spark-2.3.x/target/microsoft-spark-2.3.x-0.6.0.jar \
195+
~/dotnet.spark/src/scala/microsoft-spark-<version>/target/microsoft-spark-<version>.jar \
196196
Microsoft.Spark.CSharp.Examples Sql.Streaming.StructuredNetworkWordCount localhost 9999
197197
```
198198
- **[Microsoft.Spark.Examples.Sql.Streaming.StructuredKafkaWordCount (maven accessible)](../../examples/Microsoft.Spark.CSharp.Examples/Sql/Streaming/StructuredKafkaWordCount.cs)**
@@ -201,7 +201,7 @@ Once you build the samples, you can use `spark-submit` to submit your .NET Core
201201
--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.2 \
202202
--class org.apache.spark.deploy.dotnet.DotnetRunner \
203203
--master local \
204-
~/dotnet.spark/src/scala/microsoft-spark-2.3.x/target/microsoft-spark-2.3.x-0.6.0.jar \
204+
~/dotnet.spark/src/scala/microsoft-spark-<version>/target/microsoft-spark-<version>.jar \
205205
Microsoft.Spark.CSharp.Examples Sql.Streaming.StructuredKafkaWordCount localhost:9092 subscribe test
206206
```
207207
- **[Microsoft.Spark.Examples.Sql.Streaming.StructuredKafkaWordCount (jars provided)](../../examples/Microsoft.Spark.CSharp.Examples/Sql/Streaming/StructuredKafkaWordCount.cs)**
@@ -210,7 +210,7 @@ Once you build the samples, you can use `spark-submit` to submit your .NET Core
210210
--jars path/to/net.jpountz.lz4/lz4-1.3.0.jar,path/to/org.apache.kafka/kafka-clients-0.10.0.1.jar,path/to/org.apache.spark/spark-sql-kafka-0-10_2.11-2.3.2.jar,`path/to/org.slf4j/slf4j-api-1.7.6.jar,path/to/org.spark-project.spark/unused-1.0.0.jar,path/to/org.xerial.snappy/snappy-java-1.1.2.6.jar \
211211
--class org.apache.spark.deploy.dotnet.DotnetRunner \
212212
--master local \
213-
~/dotnet.spark/src/scala/microsoft-spark-2.3.x/target/microsoft-spark-2.3.x-0.6.0.jar \
213+
~/dotnet.spark/src/scala/microsoft-spark-<version>/target/microsoft-spark-<version>.jar \
214214
Microsoft.Spark.CSharp.Examples Sql.Streaming.StructuredKafkaWordCount localhost:9092 subscribe test
215215
```
216216

docs/building/windows-instructions.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -216,15 +216,15 @@ Once you build the samples, running them will be through `spark-submit` regardle
216216
spark-submit.cmd `
217217
--class org.apache.spark.deploy.dotnet.DotnetRunner `
218218
--master local `
219-
C:\github\dotnet-spark\src\scala\microsoft-spark-2.3.x\target\microsoft-spark-2.3.x-0.6.0.jar `
219+
C:\github\dotnet-spark\src\scala\microsoft-spark-<version>\target\microsoft-spark-<version>.jar `
220220
Microsoft.Spark.CSharp.Examples.exe Sql.Batch.Basic %SPARK_HOME%\examples\src\main\resources\people.json
221221
```
222222
- **[Microsoft.Spark.Examples.Sql.Streaming.StructuredNetworkWordCount](../../examples/Microsoft.Spark.CSharp.Examples/Sql/Streaming/StructuredNetworkWordCount.cs)**
223223
```powershell
224224
spark-submit.cmd `
225225
--class org.apache.spark.deploy.dotnet.DotnetRunner `
226226
--master local `
227-
C:\github\dotnet-spark\src\scala\microsoft-spark-2.3.x\target\microsoft-spark-2.3.x-0.6.0.jar `
227+
C:\github\dotnet-spark\src\scala\microsoft-spark-<version>\target\microsoft-spark-<version>.jar `
228228
Microsoft.Spark.CSharp.Examples.exe Sql.Streaming.StructuredNetworkWordCount localhost 9999
229229
```
230230
- **[Microsoft.Spark.Examples.Sql.Streaming.StructuredKafkaWordCount (maven accessible)](../../examples/Microsoft.Spark.CSharp.Examples/Sql/Streaming/StructuredKafkaWordCount.cs)**
@@ -233,7 +233,7 @@ Once you build the samples, running them will be through `spark-submit` regardle
233233
--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.2 `
234234
--class org.apache.spark.deploy.dotnet.DotnetRunner `
235235
--master local `
236-
C:\github\dotnet-spark\src\scala\microsoft-spark-2.3.x\target\microsoft-spark-2.3.x-0.6.0.jar `
236+
C:\github\dotnet-spark\src\scala\microsoft-spark-<version>\target\microsoft-spark-<version>.jar `
237237
Microsoft.Spark.CSharp.Examples.exe Sql.Streaming.StructuredKafkaWordCount localhost:9092 subscribe test
238238
```
239239
- **[Microsoft.Spark.Examples.Sql.Streaming.StructuredKafkaWordCount (jars provided)](../../examples/Microsoft.Spark.CSharp.Examples/Sql/Streaming/StructuredKafkaWordCount.cs)**
@@ -242,7 +242,7 @@ Once you build the samples, running them will be through `spark-submit` regardle
242242
--jars path\to\net.jpountz.lz4\lz4-1.3.0.jar,path\to\org.apache.kafka\kafka-clients-0.10.0.1.jar,path\to\org.apache.spark\spark-sql-kafka-0-10_2.11-2.3.2.jar,`path\to\org.slf4j\slf4j-api-1.7.6.jar,path\to\org.spark-project.spark\unused-1.0.0.jar,path\to\org.xerial.snappy\snappy-java-1.1.2.6.jar `
243243
--class org.apache.spark.deploy.dotnet.DotnetRunner `
244244
--master local `
245-
C:\github\dotnet-spark\src\scala\microsoft-spark-2.3.x\target\microsoft-spark-2.3.x-0.6.0.jar `
245+
C:\github\dotnet-spark\src\scala\microsoft-spark-<version>\target\microsoft-spark-<version>.jar `
246246
Microsoft.Spark.CSharp.Examples.exe Sql.Streaming.StructuredKafkaWordCount localhost:9092 subscribe test
247247
```
248248

docs/deploy-worker-udf-binaries.md

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ When deploying workers and writing UDFs, there are a few commonly used environme
1616
</tr>
1717
<tr>
1818
<td><b>DOTNET_WORKER_DIR</b></td>
19-
<td>Path where the <code>Microsoft.Spark.Worker</code> binary has been generated.</br>It's used by the Spark driver and will be passed to Spark executors. If this variable is not set up, the Spark executors will search the path specified in the <code>PATH</code> environment variable.</br><i>e.g. "C:\bin\Microsoft.Spark.Worker-0.6.0"</i></td>
19+
<td>Path where the <code>Microsoft.Spark.Worker</code> binary has been generated.</br>It's used by the Spark driver and will be passed to Spark executors. If this variable is not set up, the Spark executors will search the path specified in the <code>PATH</code> environment variable.</br><i>e.g. "C:\bin\Microsoft.Spark.Worker"</i></td>
2020
</tr>
2121
<tr>
2222
<td><b>DOTNET_ASSEMBLY_SEARCH_PATHS</b></td>
@@ -50,7 +50,7 @@ Once the Spark application is [bundled](https://spark.apache.org/docs/latest/sub
5050
</tr>
5151
<tr>
5252
<td><b>--conf</b></td>
53-
<td>Arbitrary Spark configuration property in <code>key=value</code> format.</br><i>e.g. spark.yarn.appMasterEnv.DOTNET_WORKER_DIR=.\worker\Microsoft.Spark.Worker-0.6.0</i></td>
53+
<td>Arbitrary Spark configuration property in <code>key=value</code> format.</br><i>e.g. spark.yarn.appMasterEnv.DOTNET_WORKER_DIR=.\worker\Microsoft.Spark.Worker</i></td>
5454
</tr>
5555
<tr>
5656
<td><b>--files</b></td>
@@ -68,12 +68,12 @@ Once the Spark application is [bundled](https://spark.apache.org/docs/latest/sub
6868
<li>Please note that this option is only applicable for yarn mode.</li>
6969
<li>It supports specifying file names with # similar to Hadoop.</br>
7070
</ul>
71-
<i>e.g. <code>hdfs://&lt;path to your worker file&gt;/Microsoft.Spark.Worker.net461.win-x64-0.6.0.zip#worker</code>. This will copy and extract the zip file to <code>worker</code> folder.</i></li></td>
71+
<i>e.g. <code>hdfs://&lt;path to your worker file&gt;/Microsoft.Spark.Worker.zip#worker</code>. This will copy and extract the zip file to <code>worker</code> folder.</i></li></td>
7272
</tr>
7373
<tr>
7474
<td><b>application-jar</b></td>
7575
<td>Path to a bundled jar including your application and all dependencies.</br>
76-
<i>e.g. hdfs://&lt;path to your jar&gt;/microsoft-spark-2.4.x-0.6.0.jar</i></td>
76+
<i>e.g. hdfs://&lt;path to your jar&gt;/microsoft-spark-&lt;version&gt;.jar</i></td>
7777
</tr>
7878
<tr>
7979
<td><b>application-arguments</b></td>
@@ -95,10 +95,9 @@ Once the Spark application is [bundled](https://spark.apache.org/docs/latest/sub
9595
**Answer:** Please try restarting your PowerShell window (or other command windows) first so that it can take the latest environment variable values. Then start your program.
9696

9797
#### 3. Question: After submitting my Spark application, I get the error `System.TypeLoadException: Could not load type 'System.Runtime.Remoting.Contexts.Context'`.
98-
> **Command:** %SPARK_HOME%\bin\spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --master local microsoft-spark-2.4.x-0.6.0.jar mySparkApp.exe</br>
99-
**Error:** [ ] [ ] [Error] [TaskRunner] [0] ProcessStream() failed with exception: System.TypeLoadException: Could not load type 'System.Runtime.Remoting.Contexts.Context' from assembly 'mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=...'.
98+
> **Error:** [ ] [ ] [Error] [TaskRunner] [0] ProcessStream() failed with exception: System.TypeLoadException: Could not load type 'System.Runtime.Remoting.Contexts.Context' from assembly 'mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=...'.
10099
101-
**Answer:** Please check the `Microsoft.Spark.Worker` version you are using. We currently provide two versions: **.NET Framework 4.6.1** and **.NET Core 2.1.x**. In this case, `Microsoft.Spark.Worker.net461.win-x64-0.6.0` (which you can download [here](https://github.com/dotnet/spark/releases)) should be used since `System.Runtime.Remoting.Contexts.Context` is only for .NET Framework.
100+
**Answer:** Please check the `Microsoft.Spark.Worker` version you are using. We currently provide two versions: **.NET Framework 4.6.1** and **.NET Core 2.1.x**. In this case, `Microsoft.Spark.Worker.net461.win-x64-<version>` (which you can download [here](https://github.com/dotnet/spark/releases)) should be used since `System.Runtime.Remoting.Contexts.Context` is only for .NET Framework.
102101

103102
#### 4. Question: How to run my spark application with UDFs on YARN? Which environment variables and parameters should I use?
104103

@@ -108,9 +107,9 @@ spark-submit \
108107
--class org.apache.spark.deploy.dotnet.DotnetRunner \
109108
--master yarn \
110109
--deploy-mode cluster \
111-
--conf spark.yarn.appMasterEnv.DOTNET_WORKER_DIR=./worker/Microsoft.Spark.Worker-0.6.0 \
110+
--conf spark.yarn.appMasterEnv.DOTNET_WORKER_DIR=./worker/Microsoft.Spark.Worker-<version> \
112111
--conf spark.yarn.appMasterEnv.DOTNET_ASSEMBLY_SEARCH_PATHS=./udfs \
113-
--archives hdfs://<path to your files>/Microsoft.Spark.Worker.net461.win-x64-0.6.0.zip#worker,hdfs://<path to your files>/mySparkApp.zip#udfs \
114-
hdfs://<path to jar file>/microsoft-spark-2.4.x-0.6.0.jar \
112+
--archives hdfs://<path to your files>/Microsoft.Spark.Worker.net461.win-x64-<version>.zip#worker,hdfs://<path to your files>/mySparkApp.zip#udfs \
113+
hdfs://<path to jar file>/microsoft-spark-2.4.x-<version>.jar \
115114
hdfs://<path to your files>/mySparkApp.zip mySparkApp
116115
```

docs/release-notes/0.7/release-0.7.md

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
# .NET for Apache Spark 0.7 Release Notes
2+
3+
### New Features and Improvements
4+
5+
* Expose `SparkContext.setLogLevel()` to change log level programmatically ([#360](https://github.com/dotnet/spark/pull/360))
6+
* Expose `Schema()` for `DataFrameReader` and `DataStreamReader`([#248](https://github.com/dotnet/spark/pull/248))
7+
* Expose `Column.Apply()` ([#323](https://github.com/dotnet/spark/pull/323))
8+
* "Deprecated" annotation is added to the following functions:
9+
* `WindowSpec.RangeBetween(Column start, Column end)`
10+
* `Window.RangeBetween(Column start, Column end)`
11+
* `Functions.UnboundedPreceding()`
12+
* `Functions.UnboundedFollowing()`
13+
* `Functions.CurrentRow()`
14+
15+
### Breaking Changes
16+
* The following APIs have been removed due to the thread-local variable dependency (see [#332](https://github.com/dotnet/spark/pull/332) and [#333](https://github.com/dotnet/spark/issues/333) for more detail):
17+
* `SparkSession.ClearActiveSession()`
18+
* `SparkSession.GetActiveSession()`
19+
* `SparkSession.SetActiveSession()`
20+
21+
### Supported Spark Versions
22+
23+
The following table outlines the supported Spark versions along with the microsoft-spark JAR to use with:
24+
25+
<table>
26+
<thead>
27+
<tr>
28+
<th>Spark Version</th>
29+
<th>microsoft-spark JAR</th>
30+
</tr>
31+
</thead>
32+
<tbody align="center">
33+
<tr>
34+
<td>2.3.*</td>
35+
<td>microsoft-spark-2.3.x-0.7.0.jar</td>
36+
</tr>
37+
<tr>
38+
<td>2.4.0</td>
39+
<td rowspan=4>microsoft-spark-2.4.x-0.7.0.jar</td>
40+
</tr>
41+
<tr>
42+
<td>2.4.1</td>
43+
</tr>
44+
<tr>
45+
<td>2.4.3</td>
46+
</tr>
47+
<tr>
48+
<td>2.4.4</td>
49+
</tr>
50+
<tr>
51+
<td>2.4.2</td>
52+
<td><a href="https://github.com/dotnet/spark/issues/60">Not supported</a></td>
53+
</tr>
54+
</tbody>
55+
</table>

eng/Versions.props

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
<?xml version="1.0" encoding="utf-8"?>
22
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
33
<PropertyGroup>
4-
<VersionPrefix>0.6.0</VersionPrefix>
4+
<VersionPrefix>0.7.0</VersionPrefix>
55
<PreReleaseVersionLabel>prerelease</PreReleaseVersionLabel>
66
<RestoreSources>
77
$(RestoreSources);

src/scala/microsoft-spark-2.3.x/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
<parent>
55
<groupId>com.microsoft.scala</groupId>
66
<artifactId>microsoft-spark</artifactId>
7-
<version>0.6.0</version>
7+
<version>0.7.0</version>
88
</parent>
99
<artifactId>microsoft-spark-2.3.x</artifactId>
1010
<inceptionYear>2019</inceptionYear>

src/scala/microsoft-spark-2.4.x/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
<parent>
55
<groupId>com.microsoft.scala</groupId>
66
<artifactId>microsoft-spark</artifactId>
7-
<version>0.6.0</version>
7+
<version>0.7.0</version>
88
</parent>
99
<artifactId>microsoft-spark-2.4.x</artifactId>
1010
<inceptionYear>2019</inceptionYear>

src/scala/microsoft-spark-3.0.x/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
<parent>
55
<groupId>com.microsoft.scala</groupId>
66
<artifactId>microsoft-spark</artifactId>
7-
<version>0.6.0</version>
7+
<version>0.7.0</version>
88
</parent>
99
<artifactId>microsoft-spark-3.0.x</artifactId>
1010
<inceptionYear>2019</inceptionYear>

src/scala/pom.xml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,14 @@
44
<groupId>com.microsoft.scala</groupId>
55
<artifactId>microsoft-spark</artifactId>
66
<packaging>pom</packaging>
7-
<version>0.6.0</version>
7+
<version>0.7.0</version>
88
<properties>
99
<encoding>UTF-8</encoding>
1010
</properties>
1111

1212
<modules>
1313
<module>microsoft-spark-2.3.x</module>
1414
<module>microsoft-spark-2.4.x</module>
15-
<module>microsoft-spark-3.0.x</module>
1615
</modules>
1716

1817
<pluginRepositories>

0 commit comments

Comments
 (0)