From a6b846a0b6d53cde8180ced61ad32e26107412e8 Mon Sep 17 00:00:00 2001 From: ravenscroftj Date: Mon, 19 Dec 2022 14:30:05 +0000 Subject: [PATCH] Add 'brainsteam/content/annotations/2022/12/19/1671459633.md' --- .../annotations/2022/12/19/1671459633.md | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 brainsteam/content/annotations/2022/12/19/1671459633.md diff --git a/brainsteam/content/annotations/2022/12/19/1671459633.md b/brainsteam/content/annotations/2022/12/19/1671459633.md new file mode 100644 index 0000000..f52d4a6 --- /dev/null +++ b/brainsteam/content/annotations/2022/12/19/1671459633.md @@ -0,0 +1,62 @@ +--- +date: '2022-12-19T14:20:33' +hypothesis-meta: + created: '2022-12-19T14:20:33.068063+00:00' + document: + title: + - My AI Safety Lecture for UT Effective Altruism + flagged: false + group: __world__ + hidden: false + id: TGVxKn-oEe2vUGtB_ufnbw + links: + html: https://hypothes.is/a/TGVxKn-oEe2vUGtB_ufnbw + incontext: https://hyp.is/TGVxKn-oEe2vUGtB_ufnbw/scottaaronson.blog/?p=6823 + json: https://hypothes.is/api/annotations/TGVxKn-oEe2vUGtB_ufnbw + permissions: + admin: + - acct:ravenscroftj@hypothes.is + delete: + - acct:ravenscroftj@hypothes.is + read: + - group:__world__ + update: + - acct:ravenscroftj@hypothes.is + tags: + - ai + - nlproc + target: + - selector: + - endContainer: /div[2]/div[2]/div[2]/div[1]/p[49] + endOffset: 48 + startContainer: /div[2]/div[2]/div[2]/div[1]/p[49] + startOffset: 33 + type: RangeSelector + - end: 19549 + start: 19534 + type: TextPositionSelector + - exact: " \u201CAI alignment\u201D" + prefix: t the other end of the spectrum, + suffix: ' is where you believe that reall' + type: TextQuoteSelector + source: https://scottaaronson.blog/?p=6823 + text: AI Alignment is terminator situation. This versus AI Ethics which is more + the concern around current models being racist etc. + updated: '2022-12-19T14:20:33.068063+00:00' + uri: https://scottaaronson.blog/?p=6823 + user: acct:ravenscroftj@hypothes.is + user_info: + display_name: James Ravenscroft +in-reply-to: https://scottaaronson.blog/?p=6823 +tags: +- ai +- nlproc +- hypothesis +type: annotation +url: /annotations/2022/12/19/1671459633 + +--- + + + +
“AI alignment”
AI Alignment is terminator situation. This versus AI Ethics which is more the concern around current models being racist etc. \ No newline at end of file