]>
Commit | Line | Data |
---|---|---|
1 | # NOTE: | |
2 | # our .zprofile is expensive, so we keep track of what has been run already, | |
3 | # and only set up what is necessary. additionally, we want to ensure that our | |
4 | # environment is set up as early as possible, so we also source .zprofile in | |
5 | # .zshenv for new non-login shells. | |
6 | # | |
7 | # these issues are handled by using these methods: | |
8 | # * the parent shell that starts the user's session after logging in to some | |
9 | # graphical environments may not be a login shell—due to misconfiguration | |
10 | # or otherwise—which means .zprofile is not ran and the environment is not | |
11 | # properly configured for any child processes. | |
12 | # * some desktop environments/graphical terminal emulators will start new | |
13 | # terminal windows with login shells, which runs .zprofile every time and | |
14 | # leads to noticably slow startup times. | |
15 | # * switching users without wiping the environment will result in paths and | |
16 | # variables intended for the old user being used for the new user. while | |
17 | # this may be considered an edge-case that should not be supported, there | |
18 | # are legitimate reasons to want to do this, and in any case the shell | |
19 | # should not choke or cause unexpected problems should it happen anyway. | |
20 | ||
21 | ### lang | |
22 | export CHARSET=UTF-8 | |
23 | export LANG=en_US.UTF-8 | |
24 | export LC_CTYPE=$LANG | |
25 | ### path | |
26 | # NOTE: we utilize the fact that unique arrays keep the first occurrence and | |
27 | # remove any further occurences to capture elements from the old PATH | |
28 | # that we did not anticipate and shift them to the front, since they are | |
29 | # probably important to the system | |
30 | if [[ ! -v _sev_setup_path || -o login ]] { | |
31 | typeset -U path fpath | |
32 | # add as many generic paths as possible to keep the order we want | |
33 | # NOTE: /usr/{local,pkg,games} are unix/bsdisms | |
34 | syspath=("$path[@]") | |
35 | path=({~,~/.local,,/usr,/opt,/usr/local,/usr/pkg}/sbin | |
36 | {~,~/.local,,/usr,/opt,/usr/local,/usr/pkg}/bin | |
37 | /usr/X11R{7,6}/bin /usr/games) | |
38 | ((len=$#path)) | |
39 | path=("$path[@]" "$syspath[@]") | |
40 | # remove nonexistent and duplicate paths | |
41 | for (( i = 1; i <= $#path; i++ )) { | |
42 | if [[ ! -e $path[$i] ]] { | |
43 | path[$i]=() | |
44 | ((i <= len)) && ((len--)) | |
45 | ((i--)) | |
46 | continue | |
47 | } | |
48 | } | |
49 | # shift valid system paths to the front if there are any left | |
50 | ((len > 0 && len < $#path)) && path=("${(@)path[len + 1, -1]}" "${(@)path[1, len]}") | |
51 | unset syspath len i j | |
52 | # include our zsh dir in fpath. unlike above, we always prefer our paths | |
53 | fpath=(${ZDOTDIR:-~/.zsh}/functions/{*,Completions/*}(N) "$fpath[@]") | |
54 | # FPATH is not exported by default | |
55 | export FPATH | |
56 | typeset +U path fpath | |
57 | export _sev_setup_path= | |
58 | } | |
59 | ||
60 | ### temp | |
61 | # NOTE: it's intentional to separate POSIX tmp for each session (spec says | |
62 | # programs should not expect data there to be long-lived) and to keep the | |
63 | # same runtime dir and not create a new one if a new login shell is | |
64 | # spawned, since the XDG spec calls for the same dir to be utilized for | |
65 | # each "session". | |
66 | if [[ ! -v _sev_setup_tmp ]] { | |
67 | t=${TMPDIR:-${TEMP:-${TMP:-/tmp}}}/.home-$LOGNAME | |
68 | h=~/tmp | |
69 | [[ ! -e $t ]] && mkdir -pm700 $t 2>/dev/null | |
70 | if [[ ! -d $t ]] { | |
71 | [[ -o interactive ]] && | |
72 | print -P "%F{red}!!! Can't create temp dir $t%f" | |
73 | # fallback bare directories | |
74 | [[ -h $h ]] && unlink $h 2>/dev/null | |
75 | [[ ! -e $h ]] && mkdir -m700 $h 2>/dev/null | |
76 | } | |
77 | # [re-]create link to our tmp | |
78 | [[ -h $h || ! -e $h ]] && ln -sfn $t $h 2>/dev/null | |
79 | # finally create our subdir for this session | |
80 | export _sev_tmp=$h/.session.$$ | |
81 | # ensure dir doesn't exist. if there is already something there it is | |
82 | # likely a stale directory or something is very broken—assume the former. | |
83 | # the user could also want dirs recreated by unsetting the var. | |
84 | if [[ -h $_sev_tmp ]] { | |
85 | unlink $_sev_tmp 2>/dev/null | |
86 | } elif [[ -e $_sev_tmp ]] { | |
87 | rm -rf $_sev_tmp 2>/dev/null | |
88 | } | |
89 | mkdir -m700 $_sev_tmp 2>/dev/null | |
90 | export TMPDIR=$_sev_tmp TEMP=$_sev_tmp TMP=$_sev_tmp | |
91 | unset t h | |
92 | export _sev_setup_tmp= | |
93 | } | |
94 | ||
95 | ### xdg | |
96 | if [[ ! -v _sev_setup_xdg ]] { | |
97 | # merge with any existing dirs and remove duplicates using unique arrays | |
98 | # NOTE: include and then remove CONFIG_HOME and DATA_HOME to ensure they | |
99 | # are not present in the array if it was added before we got to it | |
100 | typeset -UT XDG_CONFIG_DIRS xdg_config_dirs | |
101 | export XDG_CONFIG_HOME=~/etc | |
102 | mkdir -p $XDG_CONFIG_HOME | |
103 | xdg_config_dirs=($XDG_CONFIG_HOME ~/.config | |
104 | {/opt,/usr/local,/usr/pkg,}/etc/xdg | |
105 | "${XDG_CONFIG_DIRS:+${xdg_config_dirs[@]}}") | |
106 | export XDG_CONFIG_DIRS=${XDG_CONFIG_DIRS#$XDG_CONFIG_HOME} | |
107 | ||
108 | typeset -UT XDG_DATA_DIRS xdg_data_dirs | |
109 | export XDG_DATA_HOME=~/share | |
110 | mkdir -p $XDG_DATA_HOME | |
111 | xdg_data_dirs=($XDG_DATA_HOME ~/.local/share | |
112 | /{opt,usr/local,usr/pkg,usr}/share | |
113 | "${XDG_DATA_DIRS:+${xdg_data_dirs[@]}}") | |
114 | export XDG_DATA_DIRS=${XDG_DATA_DIRS#$XDG_DATA_HOME} | |
115 | ||
116 | export XDG_STATE_HOME=~/var/lib | |
117 | mkdir -p $XDG_STATE_HOME | |
118 | ||
119 | # use our custom tmp for cache and runtime | |
120 | export XDG_CACHE_HOME=$_sev_tmp/.xdg.cache | |
121 | export XDG_RUNTIME_DIR=$_sev_tmp/.xdg.runtime | |
122 | # create xdg tmp dirs | |
123 | for x in $XDG_CACHE_HOME $XDG_RUNTIME_DIR; do | |
124 | # same as in temp creation, ensure it doesn't exist | |
125 | if [[ -h $x ]]; then | |
126 | unlink $x 2>/dev/null | |
127 | elif [[ -e $x ]]; then | |
128 | rm -rf $x 2>/dev/null | |
129 | fi | |
130 | # XXX: cache does not have to be 700 according to spec | |
131 | mkdir -m700 $x 2>/dev/null | |
132 | done | |
133 | ||
134 | # source user dirs after other vars | |
135 | [[ -e $XDG_CONFIG_HOME/user-dirs.dirs ]] && | |
136 | emulate sh -c "source $XDG_CONFIG_HOME/user-dirs.dirs" | |
137 | export _sev_setup_xdg= | |
138 | } | |
139 | ||
140 | ### gpg + ssh + forwarding | |
141 | # NOTE: while ssh manages its auth sock in its protocol when ForwardSsh is | |
142 | # enabled, GPG must be forwarded manually over Unix socket. to support | |
143 | # this, we forward the restricted gpg-agent extra socket to the remote | |
144 | # host with a RemoteForward rule in ~/.ssh/config that uses the | |
145 | # _GNUPG_SOCK_* env vars. to avoid conflicts with other ssh sessions | |
146 | # where the same user is connecting to the same host from different | |
147 | # machines, gpg in each environment should utilize its own forwarded | |
148 | # socket, rather than replace the sockets in GNUPGHOME which will be | |
149 | # overridden on the next connection. previously, you could provide a path | |
150 | # to the agent socket in GPG_AGENT_INFO, but that was deprecated in GPG | |
151 | # v2.1. instead, we must clone GNUPGHOME with links and replace the agent | |
152 | # sockets there with the forwarded one. | |
153 | # NOTE: since Unix sockets are not supported under Windows, this will not work | |
154 | # under msys, cygwin, mingw, etc., but may work under wsl2. | |
155 | # HACK: without SendEnv, which is disabled by default in most sshd configs, | |
156 | # there is no foolproof way to prevent race conditions via filename | |
157 | # collisions or to pass the desired forward path to the remote host | |
158 | # environment. we just have to guess the path we choose is good on the | |
159 | # desination, and assume the newest matching socket is the correct one | |
160 | # after connecting. in theory, we could occlude the ssh binary on PATH | |
161 | # with an alias or script that would allow us to communicate with the | |
162 | # remote host before opening a shell, so that we can have the host | |
163 | # communicate back to the client where it wants a socket created or ask | |
164 | # the host if the path the client wants to use is writable. however, this | |
165 | # would open up too many edge cases where it wouldn't work or be clunky | |
166 | # (e.g. asking for password twice) to make it worth it. | |
167 | if [[ ! -v _sev_setup_agents ]] { | |
168 | export GNUPGHOME=~/etc/gnupg | |
169 | ||
170 | function _socketpath { | |
171 | # dirs are percent-encoded: https://stackoverflow.com/a/64312099 | |
172 | echo ${1//(#b)%([[:xdigit:]](#c2))/${(#):-0x$match[1]}} | |
173 | } | |
174 | ||
175 | ## gpg forwarding | |
176 | if [[ ! -v _sev_gpg_forwarded && -v commands[gpg] ]] { | |
177 | export _GNUPG_SOCK_DEST_BASE=/tmp/.gpg-agent-forward | |
178 | export _GNUPG_SOCK_DEST_EXT=$(date +%s).$RANDOM | |
179 | export _GNUPG_SOCK_DEST=$_GNUPG_SOCK_DEST_BASE.$_GNUPG_SOCK_DEST_EXT | |
180 | export _sev_gpg_forward_dir=${GNUPGHOME:-~/.gnupg}/.ssh_forward | |
181 | # clean up forward dirs if its session is dead or we ask for it | |
182 | if [[ -d $_sev_gpg_forward_dir ]] { | |
183 | find $_sev_gpg_forward_dir -type d -mindepth 1 -maxdepth 1 | | |
184 | while read -r x; do | |
185 | # NOTE: the only way we can get here is if we have not been | |
186 | # forwarded before or if the user asks for it. if our own | |
187 | # pid already has a dir, it is most likely stale, or | |
188 | # something is very broken—assume the former. | |
189 | p=$(basename $x) | |
190 | if [[ -v _sev_gpg_forward_clean || $$ == $p ]] || | |
191 | ! kill -0 $p 2>/dev/null; then | |
192 | find $x -mindepth 1 -maxdepth 1 | while read -r y; do | |
193 | unlink $y | |
194 | done | |
195 | rmdir $x | |
196 | fi | |
197 | done | |
198 | unset x p y | |
199 | } | |
200 | ||
201 | # find our forwarded socket | |
202 | s=($_GNUPG_SOCK_DEST_BASE*(N=oc[1])) | |
203 | if [[ -n $s && -v SSH_CLIENT ]] { | |
204 | # create new forward dir | |
205 | export _sev_gpg_forwarded= | |
206 | mkdir -pm700 $_sev_gpg_forward_dir | |
207 | h=$_sev_gpg_forward_dir/$$ | |
208 | mkdir -pm700 $h | |
209 | # XXX: is it safe to link scdaemon socket? can its name be changed? | |
210 | for x in S.scdaemon gpg.conf gpg-agent.conf sshcontrol \ | |
211 | pubring.kbx trustdb.gpg private-keys-v1.d crls.d; do | |
212 | ln -s ${GNUPGHOME:-~/.gnupg}/$x $h | |
213 | done | |
214 | export GNUPGHOME=$h | |
215 | unset h | |
216 | for x in $(gpgconf --list-dirs | grep 'agent-.*-\?socket:'); do | |
217 | x=$(_socketpath ${x/#agent-*socket:}) | |
218 | if [[ ! -v orig ]] { | |
219 | # move forwarded socket to first valid agent socket path | |
220 | # XXX: if tmp is on different filesystem this may not work | |
221 | mv $s $x | |
222 | orig=$x | |
223 | } else { | |
224 | # make links to forwarded socket for any others | |
225 | ln -s $orig $x | |
226 | } | |
227 | done | |
228 | unset x orig | |
229 | } | |
230 | unset s | |
231 | ||
232 | # what we will forward if we start a new ssh connection | |
233 | # NOTE: do this after setting up GNUPGHOME to pick up new socket path; | |
234 | # if already connected over SSH, extra should be the remote one | |
235 | export _GNUPG_SOCK_SRC=$(_socketpath \ | |
236 | $(gpgconf --list-dirs agent-extra-socket)) | |
237 | } else { | |
238 | # required for RemoteForward to not error out if the vars are unset | |
239 | [[ ! -v _GNUPG_SOCK_SRC ]] && export _GNUPG_SOCK_SRC=/nonexistent | |
240 | [[ ! -v _GNUPG_SOCK_DEST ]] && export _GNUPG_SOCK_DEST=/nonexistent | |
241 | } | |
242 | ||
243 | ## gpg agent | |
244 | if [[ -v commands[gpg-connect-agent] ]] { | |
245 | [[ -o interactive ]] && print -nP '%F{blue}>>>%f GPG agent: %F{green}' | |
246 | gpg-connect-agent /bye >/dev/null 2>&1 | |
247 | if [[ $? -ne 0 ]] { | |
248 | [[ -o interactive ]] && | |
249 | print -P '%F{red}Error communicating with GPG agent%f' | |
250 | } elif [[ ! -v _sev_gpg_forward && ! -v GPG_TTY && | |
251 | ( -o interactive || -v DISPLAY ) ]] { | |
252 | # if we aren't forwarded, set up tty if it isn't and we're | |
253 | # in an interactive session | |
254 | export GPG_TTY=$(tty) | |
255 | export PINENTRY_USER_DATA=USE_TTY=$((!${+DISPLAY})) | |
256 | gpg-connect-agent UPDATESTARTUPTTY /bye >/dev/null 2>&1 | |
257 | [[ -o interactive ]] && | |
258 | print -P "Updated TTY%f" | |
259 | } else { | |
260 | [[ -o interactive ]] && | |
261 | print -P 'Ready%f' | |
262 | } | |
263 | } | |
264 | ||
265 | ## ssh agent | |
266 | # NOTE: preferred order of agents to check: okcagent, gnupg, openssh | |
267 | # first block takes care of okcagent and openssh, second gnupg | |
268 | [[ -o interactive ]] && print -nP '%F{blue}>>>%f SSH: %F{green}' | |
269 | if [[ ! -v SSH_AUTH_SOCK && ( -v commands[okc-ssh-agent] || | |
270 | ( -v commands[ssh-agent] && ! -v commands[gpg] ) ) ]] { | |
271 | okc=${commands[okc-ssh-agent]:+okc-} | |
272 | agentfile=~/tmp/${okc}ssh-agent-exports | |
273 | typeset sock= | |
274 | typeset -i pid= | |
275 | if [[ -f $agentfile ]] { | |
276 | IFS=$'\0' read -r sock pid <$agentfile | |
277 | } | |
278 | if [[ -S $sock && $pid > 0 ]] && kill -0 $pid; then | |
279 | [[ -o interactive ]] && print -P "Reusing agent PID $pid%f" | |
280 | export SSH_AUTH_SOCK=$sock | |
281 | export SSH_AGENT_PID=$pid | |
282 | else | |
283 | e=${okc}ssh-agent | |
284 | # TODO: ensure ssh-agent path looks legit to avoid unsafe eval? | |
285 | # XXX: doesn't appear to be any other way to handle redirection. | |
286 | # because eval needs to write to current scope environment | |
287 | # subshells can't be used to capture output and print. | |
288 | if [[ -o interactive ]] { | |
289 | eval `$e` | |
290 | print -nP '%f' | |
291 | } else { | |
292 | eval `$e` >/dev/null 2>&1 | |
293 | } | |
294 | echo -n $SSH_AUTH_SOCK$'\0'$SSH_AGENT_PID >!$agentfile | |
295 | fi | |
296 | unset okc agentfile sock pid | |
297 | } elif [[ ! -v SSH_AUTH_SOCK && -v commands[gpg] ]] { | |
298 | # since gpg agent was started above, we just have to export and notify | |
299 | if [[ -o interactive ]] { | |
300 | if [[ -v _sev_gpg_forwarded ]] { | |
301 | echo 'Remote GPG agent' | |
302 | } else { | |
303 | gpg-connect-agent /subst /serverpid \ | |
304 | '/echo GPG agent PID ${get serverpid}' /bye | |
305 | } | |
306 | print -nP '%f' | |
307 | } | |
308 | export SSH_AUTH_SOCK=$(_socketpath \ | |
309 | $(gpgconf --list-dirs agent-ssh-socket)) | |
310 | } elif [[ -v SSH_AUTH_SOCK ]] { | |
311 | [[ -o interactive ]] && print -P 'Preconfigured agent%f' | |
312 | } else { | |
313 | [[ -o interactive ]] && print -P '%F{red}No agent available%f' | |
314 | } | |
315 | ||
316 | ## cleanup | |
317 | unfunction _socketpath | |
318 | ||
319 | export _sev_setup_agents= | |
320 | } | |
321 | ||
322 | ## perl local lib | |
323 | # TODO: debounce this | |
324 | [[ -v commands[perl] && -d $XDG_DATA_HOME/perl5/lib/perl5 ]] && | |
325 | eval $(perl -I$XDG_DATA_HOME/perl5/lib/perl5 | |
326 | -Mlocal::lib=$XDG_DATA_HOME/perl5 2>/dev/null) | |
327 | ||
328 | ### load site-specific | |
329 | if [[ -f ~/.zprofile.local ]] { source ~/.zprofile.local } | |
330 | ||
331 | # vim: et sts=4 sw=4 ts=8 tw=79 |