##// END OF EJS Templates
merge with brendan
Benoit Boissinot -
r3058:11e3396e merge default
parent child Browse files
Show More
@@ -0,0 +1,179 b''
1 # churn.py - create a graph showing who changed the most lines
2 #
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 #
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
7 #
8 #
9 # Aliases map file format is simple one alias per line in the following
10 # format:
11 #
12 # <alias email> <actual email>
13
14 from mercurial.demandload import *
15 from mercurial.i18n import gettext as _
16 demandload(globals(), 'time sys signal os')
17 demandload(globals(), 'mercurial:hg,mdiff,fancyopts,commands,ui,util,templater,node')
18
19 def __gather(ui, repo, node1, node2):
20 def dirtywork(f, mmap1, mmap2):
21 lines = 0
22
23 to = mmap1 and repo.file(f).read(mmap1[f]) or None
24 tn = mmap2 and repo.file(f).read(mmap2[f]) or None
25
26 diff = mdiff.unidiff(to, "", tn, "", f).split("\n")
27
28 for line in diff:
29 if not line:
30 continue # skip EOF
31 if line.startswith(" "):
32 continue # context line
33 if line.startswith("--- ") or line.startswith("+++ "):
34 continue # begining of diff
35 if line.startswith("@@ "):
36 continue # info line
37
38 # changed lines
39 lines += 1
40
41 return lines
42
43 ##
44
45 lines = 0
46
47 changes = repo.status(node1, node2, None, util.always)[:5]
48
49 modified, added, removed, deleted, unknown = changes
50
51 who = repo.changelog.read(node2)[1]
52 who = templater.email(who) # get the email of the person
53
54 mmap1 = repo.manifest.read(repo.changelog.read(node1)[0])
55 mmap2 = repo.manifest.read(repo.changelog.read(node2)[0])
56 for f in modified:
57 lines += dirtywork(f, mmap1, mmap2)
58
59 for f in added:
60 lines += dirtywork(f, None, mmap2)
61
62 for f in removed:
63 lines += dirtywork(f, mmap1, None)
64
65 for f in deleted:
66 lines += dirtywork(f, mmap1, mmap2)
67
68 for f in unknown:
69 lines += dirtywork(f, mmap1, mmap2)
70
71 return (who, lines)
72
73 def gather_stats(ui, repo, amap, revs=None, progress=False):
74 stats = {}
75
76 cl = repo.changelog
77
78 if not revs:
79 revs = range(0, cl.count())
80
81 nr_revs = len(revs)
82 cur_rev = 0
83
84 for rev in revs:
85 cur_rev += 1 # next revision
86
87 node2 = cl.node(rev)
88 node1 = cl.parents(node2)[0]
89
90 if cl.parents(node2)[1] != node.nullid:
91 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
92 continue
93
94 who, lines = __gather(ui, repo, node1, node2)
95
96 # remap the owner if possible
97 if amap.has_key(who):
98 ui.note("using '%s' alias for '%s'\n" % (amap[who], who))
99 who = amap[who]
100
101 if not stats.has_key(who):
102 stats[who] = 0
103 stats[who] += lines
104
105 ui.note("rev %d: %d lines by %s\n" % (rev, lines, who))
106
107 if progress:
108 if int(100.0*(cur_rev - 1)/nr_revs) < int(100.0*cur_rev/nr_revs):
109 ui.write("%d%%.." % (int(100.0*cur_rev/nr_revs),))
110 sys.stdout.flush()
111
112 if progress:
113 ui.write("done\n")
114 sys.stdout.flush()
115
116 return stats
117
118 def churn(ui, repo, **opts):
119 "Graphs the number of lines changed"
120
121 def pad(s, l):
122 if len(s) < l:
123 return s + " " * (l-len(s))
124 return s[0:l]
125
126 def graph(n, maximum, width, char):
127 n = int(n * width / float(maximum))
128
129 return char * (n)
130
131 def get_aliases(f):
132 aliases = {}
133
134 for l in f.readlines():
135 l = l.strip()
136 alias, actual = l.split(" ")
137 aliases[alias] = actual
138
139 return aliases
140
141 amap = {}
142 aliases = opts.get('aliases')
143 if aliases:
144 try:
145 f = open(aliases,"r")
146 except OSError, e:
147 print "Error: " + e
148 return
149
150 amap = get_aliases(f)
151 f.close()
152
153 revs = [int(r) for r in commands.revrange(ui, repo, opts['rev'])]
154 revs.sort()
155 stats = gather_stats(ui, repo, amap, revs, opts.get('progress'))
156
157 # make a list of tuples (name, lines) and sort it in descending order
158 ordered = stats.items()
159 ordered.sort(cmp=lambda x, y: cmp(y[1], x[1]))
160
161 maximum = ordered[0][1]
162
163 ui.note("Assuming 80 character terminal\n")
164 width = 80 - 1
165
166 for i in ordered:
167 person = i[0]
168 lines = i[1]
169 print "%s %6d %s" % (pad(person, 20), lines,
170 graph(lines, maximum, width - 20 - 1 - 6 - 2 - 2, '*'))
171
172 cmdtable = {
173 "churn":
174 (churn,
175 [('r', 'rev', [], _('limit statistics to the specified revisions')),
176 ('', 'aliases', '', _('file with email aliases')),
177 ('', 'progress', None, _('show progress'))],
178 'hg churn [-r revision range] [-a file] [--progress]'),
179 }
@@ -1,493 +1,508 b''
1 HGRC(5)
1 HGRC(5)
2 =======
2 =======
3 Bryan O'Sullivan <bos@serpentine.com>
3 Bryan O'Sullivan <bos@serpentine.com>
4
4
5 NAME
5 NAME
6 ----
6 ----
7 hgrc - configuration files for Mercurial
7 hgrc - configuration files for Mercurial
8
8
9 SYNOPSIS
9 SYNOPSIS
10 --------
10 --------
11
11
12 The Mercurial system uses a set of configuration files to control
12 The Mercurial system uses a set of configuration files to control
13 aspects of its behaviour.
13 aspects of its behaviour.
14
14
15 FILES
15 FILES
16 -----
16 -----
17
17
18 Mercurial reads configuration data from several files, if they exist.
18 Mercurial reads configuration data from several files, if they exist.
19 The names of these files depend on the system on which Mercurial is
19 The names of these files depend on the system on which Mercurial is
20 installed.
20 installed.
21
21
22 (Unix) <install-root>/etc/mercurial/hgrc.d/*.rc::
22 (Unix) <install-root>/etc/mercurial/hgrc.d/*.rc::
23 (Unix) <install-root>/etc/mercurial/hgrc::
23 (Unix) <install-root>/etc/mercurial/hgrc::
24 Per-installation configuration files, searched for in the
24 Per-installation configuration files, searched for in the
25 directory where Mercurial is installed. For example, if installed
25 directory where Mercurial is installed. For example, if installed
26 in /shared/tools, Mercurial will look in
26 in /shared/tools, Mercurial will look in
27 /shared/tools/etc/mercurial/hgrc. Options in these files apply to
27 /shared/tools/etc/mercurial/hgrc. Options in these files apply to
28 all Mercurial commands executed by any user in any directory.
28 all Mercurial commands executed by any user in any directory.
29
29
30 (Unix) /etc/mercurial/hgrc.d/*.rc::
30 (Unix) /etc/mercurial/hgrc.d/*.rc::
31 (Unix) /etc/mercurial/hgrc::
31 (Unix) /etc/mercurial/hgrc::
32 (Windows) C:\Mercurial\Mercurial.ini::
32 (Windows) C:\Mercurial\Mercurial.ini::
33 Per-system configuration files, for the system on which Mercurial
33 Per-system configuration files, for the system on which Mercurial
34 is running. Options in these files apply to all Mercurial
34 is running. Options in these files apply to all Mercurial
35 commands executed by any user in any directory. Options in these
35 commands executed by any user in any directory. Options in these
36 files override per-installation options.
36 files override per-installation options.
37
37
38 (Unix) $HOME/.hgrc::
38 (Unix) $HOME/.hgrc::
39 (Windows) C:\Documents and Settings\USERNAME\Mercurial.ini::
39 (Windows) C:\Documents and Settings\USERNAME\Mercurial.ini::
40 (Windows) $HOME\Mercurial.ini::
40 (Windows) $HOME\Mercurial.ini::
41 Per-user configuration file, for the user running Mercurial.
41 Per-user configuration file, for the user running Mercurial.
42 Options in this file apply to all Mercurial commands executed by
42 Options in this file apply to all Mercurial commands executed by
43 any user in any directory. Options in this file override
43 any user in any directory. Options in this file override
44 per-installation and per-system options.
44 per-installation and per-system options.
45 On Windows system, one of these is chosen exclusively according
45 On Windows system, one of these is chosen exclusively according
46 to definition of HOME environment variable.
46 to definition of HOME environment variable.
47
47
48 (Unix, Windows) <repo>/.hg/hgrc::
48 (Unix, Windows) <repo>/.hg/hgrc::
49 Per-repository configuration options that only apply in a
49 Per-repository configuration options that only apply in a
50 particular repository. This file is not version-controlled, and
50 particular repository. This file is not version-controlled, and
51 will not get transferred during a "clone" operation. Options in
51 will not get transferred during a "clone" operation. Options in
52 this file override options in all other configuration files.
52 this file override options in all other configuration files.
53 On Unix, this file is only read if it belongs to a trusted user
53 On Unix, this file is only read if it belongs to a trusted user
54 or to a trusted group.
54 or to a trusted group.
55
55
56 SYNTAX
56 SYNTAX
57 ------
57 ------
58
58
59 A configuration file consists of sections, led by a "[section]" header
59 A configuration file consists of sections, led by a "[section]" header
60 and followed by "name: value" entries; "name=value" is also accepted.
60 and followed by "name: value" entries; "name=value" is also accepted.
61
61
62 [spam]
62 [spam]
63 eggs=ham
63 eggs=ham
64 green=
64 green=
65 eggs
65 eggs
66
66
67 Each line contains one entry. If the lines that follow are indented,
67 Each line contains one entry. If the lines that follow are indented,
68 they are treated as continuations of that entry.
68 they are treated as continuations of that entry.
69
69
70 Leading whitespace is removed from values. Empty lines are skipped.
70 Leading whitespace is removed from values. Empty lines are skipped.
71
71
72 The optional values can contain format strings which refer to other
72 The optional values can contain format strings which refer to other
73 values in the same section, or values in a special DEFAULT section.
73 values in the same section, or values in a special DEFAULT section.
74
74
75 Lines beginning with "#" or ";" are ignored and may be used to provide
75 Lines beginning with "#" or ";" are ignored and may be used to provide
76 comments.
76 comments.
77
77
78 SECTIONS
78 SECTIONS
79 --------
79 --------
80
80
81 This section describes the different sections that may appear in a
81 This section describes the different sections that may appear in a
82 Mercurial "hgrc" file, the purpose of each section, its possible
82 Mercurial "hgrc" file, the purpose of each section, its possible
83 keys, and their possible values.
83 keys, and their possible values.
84
84
85 decode/encode::
85 decode/encode::
86 Filters for transforming files on checkout/checkin. This would
86 Filters for transforming files on checkout/checkin. This would
87 typically be used for newline processing or other
87 typically be used for newline processing or other
88 localization/canonicalization of files.
88 localization/canonicalization of files.
89
89
90 Filters consist of a filter pattern followed by a filter command.
90 Filters consist of a filter pattern followed by a filter command.
91 Filter patterns are globs by default, rooted at the repository
91 Filter patterns are globs by default, rooted at the repository
92 root. For example, to match any file ending in ".txt" in the root
92 root. For example, to match any file ending in ".txt" in the root
93 directory only, use the pattern "*.txt". To match any file ending
93 directory only, use the pattern "*.txt". To match any file ending
94 in ".c" anywhere in the repository, use the pattern "**.c".
94 in ".c" anywhere in the repository, use the pattern "**.c".
95
95
96 The filter command can start with a specifier, either "pipe:" or
96 The filter command can start with a specifier, either "pipe:" or
97 "tempfile:". If no specifier is given, "pipe:" is used by default.
97 "tempfile:". If no specifier is given, "pipe:" is used by default.
98
98
99 A "pipe:" command must accept data on stdin and return the
99 A "pipe:" command must accept data on stdin and return the
100 transformed data on stdout.
100 transformed data on stdout.
101
101
102 Pipe example:
102 Pipe example:
103
103
104 [encode]
104 [encode]
105 # uncompress gzip files on checkin to improve delta compression
105 # uncompress gzip files on checkin to improve delta compression
106 # note: not necessarily a good idea, just an example
106 # note: not necessarily a good idea, just an example
107 *.gz = pipe: gunzip
107 *.gz = pipe: gunzip
108
108
109 [decode]
109 [decode]
110 # recompress gzip files when writing them to the working dir (we
110 # recompress gzip files when writing them to the working dir (we
111 # can safely omit "pipe:", because it's the default)
111 # can safely omit "pipe:", because it's the default)
112 *.gz = gzip
112 *.gz = gzip
113
113
114 A "tempfile:" command is a template. The string INFILE is replaced
114 A "tempfile:" command is a template. The string INFILE is replaced
115 with the name of a temporary file that contains the data to be
115 with the name of a temporary file that contains the data to be
116 filtered by the command. The string OUTFILE is replaced with the
116 filtered by the command. The string OUTFILE is replaced with the
117 name of an empty temporary file, where the filtered data must be
117 name of an empty temporary file, where the filtered data must be
118 written by the command.
118 written by the command.
119
119
120 NOTE: the tempfile mechanism is recommended for Windows systems,
120 NOTE: the tempfile mechanism is recommended for Windows systems,
121 where the standard shell I/O redirection operators often have
121 where the standard shell I/O redirection operators often have
122 strange effects. In particular, if you are doing line ending
122 strange effects. In particular, if you are doing line ending
123 conversion on Windows using the popular dos2unix and unix2dos
123 conversion on Windows using the popular dos2unix and unix2dos
124 programs, you *must* use the tempfile mechanism, as using pipes will
124 programs, you *must* use the tempfile mechanism, as using pipes will
125 corrupt the contents of your files.
125 corrupt the contents of your files.
126
126
127 Tempfile example:
127 Tempfile example:
128
128
129 [encode]
129 [encode]
130 # convert files to unix line ending conventions on checkin
130 # convert files to unix line ending conventions on checkin
131 **.txt = tempfile: dos2unix -n INFILE OUTFILE
131 **.txt = tempfile: dos2unix -n INFILE OUTFILE
132
132
133 [decode]
133 [decode]
134 # convert files to windows line ending conventions when writing
134 # convert files to windows line ending conventions when writing
135 # them to the working dir
135 # them to the working dir
136 **.txt = tempfile: unix2dos -n INFILE OUTFILE
136 **.txt = tempfile: unix2dos -n INFILE OUTFILE
137
137
138 defaults::
139 Use the [defaults] section to define command defaults, i.e. the
140 default options/arguments to pass to the specified commands.
141
142 The following example makes 'hg log' run in verbose mode, and
143 'hg status' show only the modified files, by default.
144
145 [defaults]
146 log = -v
147 status = -m
148
149 The actual commands, instead of their aliases, must be used when
150 defining command defaults. The command defaults will also be
151 applied to the aliases of the commands defined.
152
138 email::
153 email::
139 Settings for extensions that send email messages.
154 Settings for extensions that send email messages.
140 from;;
155 from;;
141 Optional. Email address to use in "From" header and SMTP envelope
156 Optional. Email address to use in "From" header and SMTP envelope
142 of outgoing messages.
157 of outgoing messages.
143 to;;
158 to;;
144 Optional. Comma-separated list of recipients' email addresses.
159 Optional. Comma-separated list of recipients' email addresses.
145 cc;;
160 cc;;
146 Optional. Comma-separated list of carbon copy recipients'
161 Optional. Comma-separated list of carbon copy recipients'
147 email addresses.
162 email addresses.
148 bcc;;
163 bcc;;
149 Optional. Comma-separated list of blind carbon copy
164 Optional. Comma-separated list of blind carbon copy
150 recipients' email addresses. Cannot be set interactively.
165 recipients' email addresses. Cannot be set interactively.
151 method;;
166 method;;
152 Optional. Method to use to send email messages. If value is
167 Optional. Method to use to send email messages. If value is
153 "smtp" (default), use SMTP (see section "[smtp]" for
168 "smtp" (default), use SMTP (see section "[smtp]" for
154 configuration). Otherwise, use as name of program to run that
169 configuration). Otherwise, use as name of program to run that
155 acts like sendmail (takes "-f" option for sender, list of
170 acts like sendmail (takes "-f" option for sender, list of
156 recipients on command line, message on stdin). Normally, setting
171 recipients on command line, message on stdin). Normally, setting
157 this to "sendmail" or "/usr/sbin/sendmail" is enough to use
172 this to "sendmail" or "/usr/sbin/sendmail" is enough to use
158 sendmail to send messages.
173 sendmail to send messages.
159
174
160 Email example:
175 Email example:
161
176
162 [email]
177 [email]
163 from = Joseph User <joe.user@example.com>
178 from = Joseph User <joe.user@example.com>
164 method = /usr/sbin/sendmail
179 method = /usr/sbin/sendmail
165
180
166 extensions::
181 extensions::
167 Mercurial has an extension mechanism for adding new features. To
182 Mercurial has an extension mechanism for adding new features. To
168 enable an extension, create an entry for it in this section.
183 enable an extension, create an entry for it in this section.
169
184
170 If you know that the extension is already in Python's search path,
185 If you know that the extension is already in Python's search path,
171 you can give the name of the module, followed by "=", with nothing
186 you can give the name of the module, followed by "=", with nothing
172 after the "=".
187 after the "=".
173
188
174 Otherwise, give a name that you choose, followed by "=", followed by
189 Otherwise, give a name that you choose, followed by "=", followed by
175 the path to the ".py" file (including the file name extension) that
190 the path to the ".py" file (including the file name extension) that
176 defines the extension.
191 defines the extension.
177
192
178 Example for ~/.hgrc:
193 Example for ~/.hgrc:
179
194
180 [extensions]
195 [extensions]
181 # (the mq extension will get loaded from mercurial's path)
196 # (the mq extension will get loaded from mercurial's path)
182 hgext.mq =
197 hgext.mq =
183 # (this extension will get loaded from the file specified)
198 # (this extension will get loaded from the file specified)
184 myfeature = ~/.hgext/myfeature.py
199 myfeature = ~/.hgext/myfeature.py
185
200
186 hooks::
201 hooks::
187 Commands or Python functions that get automatically executed by
202 Commands or Python functions that get automatically executed by
188 various actions such as starting or finishing a commit. Multiple
203 various actions such as starting or finishing a commit. Multiple
189 hooks can be run for the same action by appending a suffix to the
204 hooks can be run for the same action by appending a suffix to the
190 action. Overriding a site-wide hook can be done by changing its
205 action. Overriding a site-wide hook can be done by changing its
191 value or setting it to an empty string.
206 value or setting it to an empty string.
192
207
193 Example .hg/hgrc:
208 Example .hg/hgrc:
194
209
195 [hooks]
210 [hooks]
196 # do not use the site-wide hook
211 # do not use the site-wide hook
197 incoming =
212 incoming =
198 incoming.email = /my/email/hook
213 incoming.email = /my/email/hook
199 incoming.autobuild = /my/build/hook
214 incoming.autobuild = /my/build/hook
200
215
201 Most hooks are run with environment variables set that give added
216 Most hooks are run with environment variables set that give added
202 useful information. For each hook below, the environment variables
217 useful information. For each hook below, the environment variables
203 it is passed are listed with names of the form "$HG_foo".
218 it is passed are listed with names of the form "$HG_foo".
204
219
205 changegroup;;
220 changegroup;;
206 Run after a changegroup has been added via push, pull or
221 Run after a changegroup has been added via push, pull or
207 unbundle. ID of the first new changeset is in $HG_NODE. URL from
222 unbundle. ID of the first new changeset is in $HG_NODE. URL from
208 which changes came is in $HG_URL.
223 which changes came is in $HG_URL.
209 commit;;
224 commit;;
210 Run after a changeset has been created in the local repository.
225 Run after a changeset has been created in the local repository.
211 ID of the newly created changeset is in $HG_NODE. Parent
226 ID of the newly created changeset is in $HG_NODE. Parent
212 changeset IDs are in $HG_PARENT1 and $HG_PARENT2.
227 changeset IDs are in $HG_PARENT1 and $HG_PARENT2.
213 incoming;;
228 incoming;;
214 Run after a changeset has been pulled, pushed, or unbundled into
229 Run after a changeset has been pulled, pushed, or unbundled into
215 the local repository. The ID of the newly arrived changeset is in
230 the local repository. The ID of the newly arrived changeset is in
216 $HG_NODE. URL that was source of changes came is in $HG_URL.
231 $HG_NODE. URL that was source of changes came is in $HG_URL.
217 outgoing;;
232 outgoing;;
218 Run after sending changes from local repository to another. ID of
233 Run after sending changes from local repository to another. ID of
219 first changeset sent is in $HG_NODE. Source of operation is in
234 first changeset sent is in $HG_NODE. Source of operation is in
220 $HG_SOURCE; see "preoutgoing" hook for description.
235 $HG_SOURCE; see "preoutgoing" hook for description.
221 prechangegroup;;
236 prechangegroup;;
222 Run before a changegroup is added via push, pull or unbundle.
237 Run before a changegroup is added via push, pull or unbundle.
223 Exit status 0 allows the changegroup to proceed. Non-zero status
238 Exit status 0 allows the changegroup to proceed. Non-zero status
224 will cause the push, pull or unbundle to fail. URL from which
239 will cause the push, pull or unbundle to fail. URL from which
225 changes will come is in $HG_URL.
240 changes will come is in $HG_URL.
226 precommit;;
241 precommit;;
227 Run before starting a local commit. Exit status 0 allows the
242 Run before starting a local commit. Exit status 0 allows the
228 commit to proceed. Non-zero status will cause the commit to fail.
243 commit to proceed. Non-zero status will cause the commit to fail.
229 Parent changeset IDs are in $HG_PARENT1 and $HG_PARENT2.
244 Parent changeset IDs are in $HG_PARENT1 and $HG_PARENT2.
230 preoutgoing;;
245 preoutgoing;;
231 Run before computing changes to send from the local repository to
246 Run before computing changes to send from the local repository to
232 another. Non-zero status will cause failure. This lets you
247 another. Non-zero status will cause failure. This lets you
233 prevent pull over http or ssh. Also prevents against local pull,
248 prevent pull over http or ssh. Also prevents against local pull,
234 push (outbound) or bundle commands, but not effective, since you
249 push (outbound) or bundle commands, but not effective, since you
235 can just copy files instead then. Source of operation is in
250 can just copy files instead then. Source of operation is in
236 $HG_SOURCE. If "serve", operation is happening on behalf of
251 $HG_SOURCE. If "serve", operation is happening on behalf of
237 remote ssh or http repository. If "push", "pull" or "bundle",
252 remote ssh or http repository. If "push", "pull" or "bundle",
238 operation is happening on behalf of repository on same system.
253 operation is happening on behalf of repository on same system.
239 pretag;;
254 pretag;;
240 Run before creating a tag. Exit status 0 allows the tag to be
255 Run before creating a tag. Exit status 0 allows the tag to be
241 created. Non-zero status will cause the tag to fail. ID of
256 created. Non-zero status will cause the tag to fail. ID of
242 changeset to tag is in $HG_NODE. Name of tag is in $HG_TAG. Tag
257 changeset to tag is in $HG_NODE. Name of tag is in $HG_TAG. Tag
243 is local if $HG_LOCAL=1, in repo if $HG_LOCAL=0.
258 is local if $HG_LOCAL=1, in repo if $HG_LOCAL=0.
244 pretxnchangegroup;;
259 pretxnchangegroup;;
245 Run after a changegroup has been added via push, pull or unbundle,
260 Run after a changegroup has been added via push, pull or unbundle,
246 but before the transaction has been committed. Changegroup is
261 but before the transaction has been committed. Changegroup is
247 visible to hook program. This lets you validate incoming changes
262 visible to hook program. This lets you validate incoming changes
248 before accepting them. Passed the ID of the first new changeset
263 before accepting them. Passed the ID of the first new changeset
249 in $HG_NODE. Exit status 0 allows the transaction to commit.
264 in $HG_NODE. Exit status 0 allows the transaction to commit.
250 Non-zero status will cause the transaction to be rolled back and
265 Non-zero status will cause the transaction to be rolled back and
251 the push, pull or unbundle will fail. URL that was source of
266 the push, pull or unbundle will fail. URL that was source of
252 changes is in $HG_URL.
267 changes is in $HG_URL.
253 pretxncommit;;
268 pretxncommit;;
254 Run after a changeset has been created but the transaction not yet
269 Run after a changeset has been created but the transaction not yet
255 committed. Changeset is visible to hook program. This lets you
270 committed. Changeset is visible to hook program. This lets you
256 validate commit message and changes. Exit status 0 allows the
271 validate commit message and changes. Exit status 0 allows the
257 commit to proceed. Non-zero status will cause the transaction to
272 commit to proceed. Non-zero status will cause the transaction to
258 be rolled back. ID of changeset is in $HG_NODE. Parent changeset
273 be rolled back. ID of changeset is in $HG_NODE. Parent changeset
259 IDs are in $HG_PARENT1 and $HG_PARENT2.
274 IDs are in $HG_PARENT1 and $HG_PARENT2.
260 preupdate;;
275 preupdate;;
261 Run before updating the working directory. Exit status 0 allows
276 Run before updating the working directory. Exit status 0 allows
262 the update to proceed. Non-zero status will prevent the update.
277 the update to proceed. Non-zero status will prevent the update.
263 Changeset ID of first new parent is in $HG_PARENT1. If merge, ID
278 Changeset ID of first new parent is in $HG_PARENT1. If merge, ID
264 of second new parent is in $HG_PARENT2.
279 of second new parent is in $HG_PARENT2.
265 tag;;
280 tag;;
266 Run after a tag is created. ID of tagged changeset is in
281 Run after a tag is created. ID of tagged changeset is in
267 $HG_NODE. Name of tag is in $HG_TAG. Tag is local if
282 $HG_NODE. Name of tag is in $HG_TAG. Tag is local if
268 $HG_LOCAL=1, in repo if $HG_LOCAL=0.
283 $HG_LOCAL=1, in repo if $HG_LOCAL=0.
269 update;;
284 update;;
270 Run after updating the working directory. Changeset ID of first
285 Run after updating the working directory. Changeset ID of first
271 new parent is in $HG_PARENT1. If merge, ID of second new parent
286 new parent is in $HG_PARENT1. If merge, ID of second new parent
272 is in $HG_PARENT2. If update succeeded, $HG_ERROR=0. If update
287 is in $HG_PARENT2. If update succeeded, $HG_ERROR=0. If update
273 failed (e.g. because conflicts not resolved), $HG_ERROR=1.
288 failed (e.g. because conflicts not resolved), $HG_ERROR=1.
274
289
275 Note: In earlier releases, the names of hook environment variables
290 Note: In earlier releases, the names of hook environment variables
276 did not have a "HG_" prefix. The old unprefixed names are no longer
291 did not have a "HG_" prefix. The old unprefixed names are no longer
277 provided in the environment.
292 provided in the environment.
278
293
279 The syntax for Python hooks is as follows:
294 The syntax for Python hooks is as follows:
280
295
281 hookname = python:modulename.submodule.callable
296 hookname = python:modulename.submodule.callable
282
297
283 Python hooks are run within the Mercurial process. Each hook is
298 Python hooks are run within the Mercurial process. Each hook is
284 called with at least three keyword arguments: a ui object (keyword
299 called with at least three keyword arguments: a ui object (keyword
285 "ui"), a repository object (keyword "repo"), and a "hooktype"
300 "ui"), a repository object (keyword "repo"), and a "hooktype"
286 keyword that tells what kind of hook is used. Arguments listed as
301 keyword that tells what kind of hook is used. Arguments listed as
287 environment variables above are passed as keyword arguments, with no
302 environment variables above are passed as keyword arguments, with no
288 "HG_" prefix, and names in lower case.
303 "HG_" prefix, and names in lower case.
289
304
290 A Python hook must return a "true" value to succeed. Returning a
305 A Python hook must return a "true" value to succeed. Returning a
291 "false" value or raising an exception is treated as failure of the
306 "false" value or raising an exception is treated as failure of the
292 hook.
307 hook.
293
308
294 http_proxy::
309 http_proxy::
295 Used to access web-based Mercurial repositories through a HTTP
310 Used to access web-based Mercurial repositories through a HTTP
296 proxy.
311 proxy.
297 host;;
312 host;;
298 Host name and (optional) port of the proxy server, for example
313 Host name and (optional) port of the proxy server, for example
299 "myproxy:8000".
314 "myproxy:8000".
300 no;;
315 no;;
301 Optional. Comma-separated list of host names that should bypass
316 Optional. Comma-separated list of host names that should bypass
302 the proxy.
317 the proxy.
303 passwd;;
318 passwd;;
304 Optional. Password to authenticate with at the proxy server.
319 Optional. Password to authenticate with at the proxy server.
305 user;;
320 user;;
306 Optional. User name to authenticate with at the proxy server.
321 Optional. User name to authenticate with at the proxy server.
307
322
308 smtp::
323 smtp::
309 Configuration for extensions that need to send email messages.
324 Configuration for extensions that need to send email messages.
310 host;;
325 host;;
311 Host name of mail server, e.g. "mail.example.com".
326 Host name of mail server, e.g. "mail.example.com".
312 port;;
327 port;;
313 Optional. Port to connect to on mail server. Default: 25.
328 Optional. Port to connect to on mail server. Default: 25.
314 tls;;
329 tls;;
315 Optional. Whether to connect to mail server using TLS. True or
330 Optional. Whether to connect to mail server using TLS. True or
316 False. Default: False.
331 False. Default: False.
317 username;;
332 username;;
318 Optional. User name to authenticate to SMTP server with.
333 Optional. User name to authenticate to SMTP server with.
319 If username is specified, password must also be specified.
334 If username is specified, password must also be specified.
320 Default: none.
335 Default: none.
321 password;;
336 password;;
322 Optional. Password to authenticate to SMTP server with.
337 Optional. Password to authenticate to SMTP server with.
323 If username is specified, password must also be specified.
338 If username is specified, password must also be specified.
324 Default: none.
339 Default: none.
325 local_hostname;;
340 local_hostname;;
326 Optional. It's the hostname that the sender can use to identify itself
341 Optional. It's the hostname that the sender can use to identify itself
327 to the MTA.
342 to the MTA.
328
343
329 paths::
344 paths::
330 Assigns symbolic names to repositories. The left side is the
345 Assigns symbolic names to repositories. The left side is the
331 symbolic name, and the right gives the directory or URL that is the
346 symbolic name, and the right gives the directory or URL that is the
332 location of the repository. Default paths can be declared by
347 location of the repository. Default paths can be declared by
333 setting the following entries.
348 setting the following entries.
334 default;;
349 default;;
335 Directory or URL to use when pulling if no source is specified.
350 Directory or URL to use when pulling if no source is specified.
336 Default is set to repository from which the current repository
351 Default is set to repository from which the current repository
337 was cloned.
352 was cloned.
338 default-push;;
353 default-push;;
339 Optional. Directory or URL to use when pushing if no destination
354 Optional. Directory or URL to use when pushing if no destination
340 is specified.
355 is specified.
341
356
342 server::
357 server::
343 Controls generic server settings.
358 Controls generic server settings.
344 uncompressed;;
359 uncompressed;;
345 Whether to allow clients to clone a repo using the uncompressed
360 Whether to allow clients to clone a repo using the uncompressed
346 streaming protocol. This transfers about 40% more data than a
361 streaming protocol. This transfers about 40% more data than a
347 regular clone, but uses less memory and CPU on both server and
362 regular clone, but uses less memory and CPU on both server and
348 client. Over a LAN (100Mbps or better) or a very fast WAN, an
363 client. Over a LAN (100Mbps or better) or a very fast WAN, an
349 uncompressed streaming clone is a lot faster (~10x) than a regular
364 uncompressed streaming clone is a lot faster (~10x) than a regular
350 clone. Over most WAN connections (anything slower than about
365 clone. Over most WAN connections (anything slower than about
351 6Mbps), uncompressed streaming is slower, because of the extra
366 6Mbps), uncompressed streaming is slower, because of the extra
352 data transfer overhead. Default is False.
367 data transfer overhead. Default is False.
353
368
354 trusted::
369 trusted::
355 Mercurial will only read the .hg/hgrc file from a repository if
370 Mercurial will only read the .hg/hgrc file from a repository if
356 it belongs to a trusted user or to a trusted group. This section
371 it belongs to a trusted user or to a trusted group. This section
357 specifies what users and groups are trusted. To trust everybody,
372 specifies what users and groups are trusted. To trust everybody,
358 list a user or a group with name "*".
373 list a user or a group with name "*".
359 users;;
374 users;;
360 Comma-separated list of trusted users.
375 Comma-separated list of trusted users.
361 groups;;
376 groups;;
362 Comma-separated list of trusted groups.
377 Comma-separated list of trusted groups.
363
378
364 ui::
379 ui::
365 User interface controls.
380 User interface controls.
366 debug;;
381 debug;;
367 Print debugging information. True or False. Default is False.
382 Print debugging information. True or False. Default is False.
368 editor;;
383 editor;;
369 The editor to use during a commit. Default is $EDITOR or "vi".
384 The editor to use during a commit. Default is $EDITOR or "vi".
370 ignore;;
385 ignore;;
371 A file to read per-user ignore patterns from. This file should be in
386 A file to read per-user ignore patterns from. This file should be in
372 the same format as a repository-wide .hgignore file. This option
387 the same format as a repository-wide .hgignore file. This option
373 supports hook syntax, so if you want to specify multiple ignore
388 supports hook syntax, so if you want to specify multiple ignore
374 files, you can do so by setting something like
389 files, you can do so by setting something like
375 "ignore.other = ~/.hgignore2". For details of the ignore file
390 "ignore.other = ~/.hgignore2". For details of the ignore file
376 format, see the hgignore(5) man page.
391 format, see the hgignore(5) man page.
377 interactive;;
392 interactive;;
378 Allow to prompt the user. True or False. Default is True.
393 Allow to prompt the user. True or False. Default is True.
379 logtemplate;;
394 logtemplate;;
380 Template string for commands that print changesets.
395 Template string for commands that print changesets.
381 style;;
396 style;;
382 Name of style to use for command output.
397 Name of style to use for command output.
383 merge;;
398 merge;;
384 The conflict resolution program to use during a manual merge.
399 The conflict resolution program to use during a manual merge.
385 Default is "hgmerge".
400 Default is "hgmerge".
386 quiet;;
401 quiet;;
387 Reduce the amount of output printed. True or False. Default is False.
402 Reduce the amount of output printed. True or False. Default is False.
388 remotecmd;;
403 remotecmd;;
389 remote command to use for clone/push/pull operations. Default is 'hg'.
404 remote command to use for clone/push/pull operations. Default is 'hg'.
390 ssh;;
405 ssh;;
391 command to use for SSH connections. Default is 'ssh'.
406 command to use for SSH connections. Default is 'ssh'.
392 strict;;
407 strict;;
393 Require exact command names, instead of allowing unambiguous
408 Require exact command names, instead of allowing unambiguous
394 abbreviations. True or False. Default is False.
409 abbreviations. True or False. Default is False.
395 timeout;;
410 timeout;;
396 The timeout used when a lock is held (in seconds), a negative value
411 The timeout used when a lock is held (in seconds), a negative value
397 means no timeout. Default is 600.
412 means no timeout. Default is 600.
398 username;;
413 username;;
399 The committer of a changeset created when running "commit".
414 The committer of a changeset created when running "commit".
400 Typically a person's name and email address, e.g. "Fred Widget
415 Typically a person's name and email address, e.g. "Fred Widget
401 <fred@example.com>". Default is $EMAIL or username@hostname, unless
416 <fred@example.com>". Default is $EMAIL or username@hostname, unless
402 username is set to an empty string, which enforces specifying the
417 username is set to an empty string, which enforces specifying the
403 username manually.
418 username manually.
404 verbose;;
419 verbose;;
405 Increase the amount of output printed. True or False. Default is False.
420 Increase the amount of output printed. True or False. Default is False.
406
421
407
422
408 web::
423 web::
409 Web interface configuration.
424 Web interface configuration.
410 accesslog;;
425 accesslog;;
411 Where to output the access log. Default is stdout.
426 Where to output the access log. Default is stdout.
412 address;;
427 address;;
413 Interface address to bind to. Default is all.
428 Interface address to bind to. Default is all.
414 allow_archive;;
429 allow_archive;;
415 List of archive format (bz2, gz, zip) allowed for downloading.
430 List of archive format (bz2, gz, zip) allowed for downloading.
416 Default is empty.
431 Default is empty.
417 allowbz2;;
432 allowbz2;;
418 (DEPRECATED) Whether to allow .tar.bz2 downloading of repo revisions.
433 (DEPRECATED) Whether to allow .tar.bz2 downloading of repo revisions.
419 Default is false.
434 Default is false.
420 allowgz;;
435 allowgz;;
421 (DEPRECATED) Whether to allow .tar.gz downloading of repo revisions.
436 (DEPRECATED) Whether to allow .tar.gz downloading of repo revisions.
422 Default is false.
437 Default is false.
423 allowpull;;
438 allowpull;;
424 Whether to allow pulling from the repository. Default is true.
439 Whether to allow pulling from the repository. Default is true.
425 allow_push;;
440 allow_push;;
426 Whether to allow pushing to the repository. If empty or not set,
441 Whether to allow pushing to the repository. If empty or not set,
427 push is not allowed. If the special value "*", any remote user
442 push is not allowed. If the special value "*", any remote user
428 can push, including unauthenticated users. Otherwise, the remote
443 can push, including unauthenticated users. Otherwise, the remote
429 user must have been authenticated, and the authenticated user name
444 user must have been authenticated, and the authenticated user name
430 must be present in this list (separated by whitespace or ",").
445 must be present in this list (separated by whitespace or ",").
431 The contents of the allow_push list are examined after the
446 The contents of the allow_push list are examined after the
432 deny_push list.
447 deny_push list.
433 allowzip;;
448 allowzip;;
434 (DEPRECATED) Whether to allow .zip downloading of repo revisions.
449 (DEPRECATED) Whether to allow .zip downloading of repo revisions.
435 Default is false. This feature creates temporary files.
450 Default is false. This feature creates temporary files.
436 baseurl;;
451 baseurl;;
437 Base URL to use when publishing URLs in other locations, so
452 Base URL to use when publishing URLs in other locations, so
438 third-party tools like email notification hooks can construct URLs.
453 third-party tools like email notification hooks can construct URLs.
439 Example: "http://hgserver/repos/"
454 Example: "http://hgserver/repos/"
440 contact;;
455 contact;;
441 Name or email address of the person in charge of the repository.
456 Name or email address of the person in charge of the repository.
442 Default is "unknown".
457 Default is "unknown".
443 deny_push;;
458 deny_push;;
444 Whether to deny pushing to the repository. If empty or not set,
459 Whether to deny pushing to the repository. If empty or not set,
445 push is not denied. If the special value "*", all remote users
460 push is not denied. If the special value "*", all remote users
446 are denied push. Otherwise, unauthenticated users are all denied,
461 are denied push. Otherwise, unauthenticated users are all denied,
447 and any authenticated user name present in this list (separated by
462 and any authenticated user name present in this list (separated by
448 whitespace or ",") is also denied. The contents of the deny_push
463 whitespace or ",") is also denied. The contents of the deny_push
449 list are examined before the allow_push list.
464 list are examined before the allow_push list.
450 description;;
465 description;;
451 Textual description of the repository's purpose or contents.
466 Textual description of the repository's purpose or contents.
452 Default is "unknown".
467 Default is "unknown".
453 errorlog;;
468 errorlog;;
454 Where to output the error log. Default is stderr.
469 Where to output the error log. Default is stderr.
455 ipv6;;
470 ipv6;;
456 Whether to use IPv6. Default is false.
471 Whether to use IPv6. Default is false.
457 name;;
472 name;;
458 Repository name to use in the web interface. Default is current
473 Repository name to use in the web interface. Default is current
459 working directory.
474 working directory.
460 maxchanges;;
475 maxchanges;;
461 Maximum number of changes to list on the changelog. Default is 10.
476 Maximum number of changes to list on the changelog. Default is 10.
462 maxfiles;;
477 maxfiles;;
463 Maximum number of files to list per changeset. Default is 10.
478 Maximum number of files to list per changeset. Default is 10.
464 port;;
479 port;;
465 Port to listen on. Default is 8000.
480 Port to listen on. Default is 8000.
466 push_ssl;;
481 push_ssl;;
467 Whether to require that inbound pushes be transported over SSL to
482 Whether to require that inbound pushes be transported over SSL to
468 prevent password sniffing. Default is true.
483 prevent password sniffing. Default is true.
469 stripes;;
484 stripes;;
470 How many lines a "zebra stripe" should span in multiline output.
485 How many lines a "zebra stripe" should span in multiline output.
471 Default is 1; set to 0 to disable.
486 Default is 1; set to 0 to disable.
472 style;;
487 style;;
473 Which template map style to use.
488 Which template map style to use.
474 templates;;
489 templates;;
475 Where to find the HTML templates. Default is install path.
490 Where to find the HTML templates. Default is install path.
476
491
477
492
478 AUTHOR
493 AUTHOR
479 ------
494 ------
480 Bryan O'Sullivan <bos@serpentine.com>.
495 Bryan O'Sullivan <bos@serpentine.com>.
481
496
482 Mercurial was written by Matt Mackall <mpm@selenic.com>.
497 Mercurial was written by Matt Mackall <mpm@selenic.com>.
483
498
484 SEE ALSO
499 SEE ALSO
485 --------
500 --------
486 hg(1), hgignore(5)
501 hg(1), hgignore(5)
487
502
488 COPYING
503 COPYING
489 -------
504 -------
490 This manual page is copyright 2005 Bryan O'Sullivan.
505 This manual page is copyright 2005 Bryan O'Sullivan.
491 Mercurial is copyright 2005, 2006 Matt Mackall.
506 Mercurial is copyright 2005, 2006 Matt Mackall.
492 Free use of this software is granted under the terms of the GNU General
507 Free use of this software is granted under the terms of the GNU General
493 Public License (GPL).
508 Public License (GPL).
@@ -1,236 +1,231 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(path and util.drop_scheme('file', path)) and
17 return (os.path.isfile(path and util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path=None, create=False):
54 def repository(ui, path=None, create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106
106
107 def localpath(path):
107 def localpath(path):
108 if path.startswith('file://'):
108 if path.startswith('file://'):
109 return path[7:]
109 return path[7:]
110 if path.startswith('file:'):
110 if path.startswith('file:'):
111 return path[5:]
111 return path[5:]
112 return path
112 return path
113
113
114 dest = localpath(dest)
114 dest = localpath(dest)
115 source = localpath(source)
115 source = localpath(source)
116
116
117 if os.path.exists(dest):
117 if os.path.exists(dest):
118 raise util.Abort(_("destination '%s' already exists"), dest)
118 raise util.Abort(_("destination '%s' already exists"), dest)
119
119
120 class DirCleanup(object):
120 class DirCleanup(object):
121 def __init__(self, dir_):
121 def __init__(self, dir_):
122 self.rmtree = shutil.rmtree
122 self.rmtree = shutil.rmtree
123 self.dir_ = dir_
123 self.dir_ = dir_
124 def close(self):
124 def close(self):
125 self.dir_ = None
125 self.dir_ = None
126 def __del__(self):
126 def __del__(self):
127 if self.dir_:
127 if self.dir_:
128 self.rmtree(self.dir_, True)
128 self.rmtree(self.dir_, True)
129
129
130 dest_repo = None
130 dest_repo = repository(ui, dest, create=True)
131 try:
132 dest_repo = repository(ui, dest)
133 raise util.Abort(_("destination '%s' already exists." % dest))
134 except RepoError:
135 dest_repo = repository(ui, dest, create=True)
136
131
137 dest_path = None
132 dest_path = None
138 dir_cleanup = None
133 dir_cleanup = None
139 if dest_repo.local():
134 if dest_repo.local():
140 dest_path = os.path.realpath(dest_repo.root)
135 dest_path = os.path.realpath(dest_repo.root)
141 dir_cleanup = DirCleanup(dest_path)
136 dir_cleanup = DirCleanup(dest_path)
142
137
143 abspath = source
138 abspath = source
144 copy = False
139 copy = False
145 if src_repo.local() and dest_repo.local():
140 if src_repo.local() and dest_repo.local():
146 abspath = os.path.abspath(source)
141 abspath = os.path.abspath(source)
147 copy = not pull and not rev
142 copy = not pull and not rev
148
143
149 src_lock, dest_lock = None, None
144 src_lock, dest_lock = None, None
150 if copy:
145 if copy:
151 try:
146 try:
152 # we use a lock here because if we race with commit, we
147 # we use a lock here because if we race with commit, we
153 # can end up with extra data in the cloned revlogs that's
148 # can end up with extra data in the cloned revlogs that's
154 # not pointed to by changesets, thus causing verify to
149 # not pointed to by changesets, thus causing verify to
155 # fail
150 # fail
156 src_lock = src_repo.lock()
151 src_lock = src_repo.lock()
157 except lock.LockException:
152 except lock.LockException:
158 copy = False
153 copy = False
159
154
160 if copy:
155 if copy:
161 # we lock here to avoid premature writing to the target
156 # we lock here to avoid premature writing to the target
162 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
163
158
164 # we need to remove the (empty) data dir in dest so copyfiles
159 # we need to remove the (empty) data dir in dest so copyfiles
165 # can do its work
160 # can do its work
166 os.rmdir(os.path.join(dest_path, ".hg", "data"))
161 os.rmdir(os.path.join(dest_path, ".hg", "data"))
167 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
162 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
168 for f in files.split():
163 for f in files.split():
169 src = os.path.join(source, ".hg", f)
164 src = os.path.join(source, ".hg", f)
170 dst = os.path.join(dest_path, ".hg", f)
165 dst = os.path.join(dest_path, ".hg", f)
171 try:
166 try:
172 util.copyfiles(src, dst)
167 util.copyfiles(src, dst)
173 except OSError, inst:
168 except OSError, inst:
174 if inst.errno != errno.ENOENT:
169 if inst.errno != errno.ENOENT:
175 raise
170 raise
176
171
177 # we need to re-init the repo after manually copying the data
172 # we need to re-init the repo after manually copying the data
178 # into it
173 # into it
179 dest_repo = repository(ui, dest)
174 dest_repo = repository(ui, dest)
180
175
181 else:
176 else:
182 revs = None
177 revs = None
183 if rev:
178 if rev:
184 if not src_repo.local():
179 if not src_repo.local():
185 raise util.Abort(_("clone by revision not supported yet "
180 raise util.Abort(_("clone by revision not supported yet "
186 "for remote repositories"))
181 "for remote repositories"))
187 revs = [src_repo.lookup(r) for r in rev]
182 revs = [src_repo.lookup(r) for r in rev]
188
183
189 if dest_repo.local():
184 if dest_repo.local():
190 dest_repo.clone(src_repo, heads=revs, stream=stream)
185 dest_repo.clone(src_repo, heads=revs, stream=stream)
191 elif src_repo.local():
186 elif src_repo.local():
192 src_repo.push(dest_repo, revs=revs)
187 src_repo.push(dest_repo, revs=revs)
193 else:
188 else:
194 raise util.Abort(_("clone from remote to remote not supported"))
189 raise util.Abort(_("clone from remote to remote not supported"))
195
190
196 if src_lock:
191 if src_lock:
197 src_lock.release()
192 src_lock.release()
198
193
199 if dest_repo.local():
194 if dest_repo.local():
200 fp = dest_repo.opener("hgrc", "w", text=True)
195 fp = dest_repo.opener("hgrc", "w", text=True)
201 fp.write("[paths]\n")
196 fp.write("[paths]\n")
202 fp.write("default = %s\n" % abspath)
197 fp.write("default = %s\n" % abspath)
203 fp.close()
198 fp.close()
204
199
205 if dest_lock:
200 if dest_lock:
206 dest_lock.release()
201 dest_lock.release()
207
202
208 if update:
203 if update:
209 _merge.update(dest_repo, dest_repo.changelog.tip())
204 _merge.update(dest_repo, dest_repo.changelog.tip())
210 if dir_cleanup:
205 if dir_cleanup:
211 dir_cleanup.close()
206 dir_cleanup.close()
212
207
213 return src_repo, dest_repo
208 return src_repo, dest_repo
214
209
215 def update(repo, node):
210 def update(repo, node):
216 """update the working directory to node, merging linear changes"""
211 """update the working directory to node, merging linear changes"""
217 return _merge.update(repo, node)
212 return _merge.update(repo, node)
218
213
219 def clean(repo, node, wlock=None, show_stats=True):
214 def clean(repo, node, wlock=None, show_stats=True):
220 """forcibly switch the working directory to node, clobbering changes"""
215 """forcibly switch the working directory to node, clobbering changes"""
221 return _merge.update(repo, node, force=True, wlock=wlock,
216 return _merge.update(repo, node, force=True, wlock=wlock,
222 show_stats=show_stats)
217 show_stats=show_stats)
223
218
224 def merge(repo, node, force=None, remind=True, wlock=None):
219 def merge(repo, node, force=None, remind=True, wlock=None):
225 """branch merge with node, resolving changes"""
220 """branch merge with node, resolving changes"""
226 return _merge.update(repo, node, branchmerge=True, force=force,
221 return _merge.update(repo, node, branchmerge=True, force=force,
227 remind=remind, wlock=wlock)
222 remind=remind, wlock=wlock)
228
223
229 def revert(repo, node, choose, wlock):
224 def revert(repo, node, choose, wlock):
230 """revert changes to revision in node without updating dirstate"""
225 """revert changes to revision in node without updating dirstate"""
231 return _merge.update(repo, node, force=True, partial=choose,
226 return _merge.update(repo, node, force=True, partial=choose,
232 show_stats=False, wlock=wlock)
227 show_stats=False, wlock=wlock)
233
228
234 def verify(repo):
229 def verify(repo):
235 """verify the consistency of a repository"""
230 """verify the consistency of a repository"""
236 return _verify.verify(repo)
231 return _verify.verify(repo)
@@ -1,1749 +1,1751 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("no repo found"))
30 raise repo.RepoError(_("no repo found"))
31 path = p
31 path = p
32 self.path = os.path.join(path, ".hg")
32 self.path = os.path.join(path, ".hg")
33
33
34 if not create and not os.path.isdir(self.path):
34 if not os.path.isdir(self.path):
35 raise repo.RepoError(_("repository %s not found") % path)
35 if create:
36 if not os.path.exists(path):
37 os.mkdir(path)
38 os.mkdir(self.path)
39 os.mkdir(self.join("data"))
40 else:
41 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
36
44
37 self.root = os.path.abspath(path)
45 self.root = os.path.abspath(path)
38 self.origroot = path
46 self.origroot = path
39 self.ui = ui.ui(parentui=parentui)
47 self.ui = ui.ui(parentui=parentui)
40 self.opener = util.opener(self.path)
48 self.opener = util.opener(self.path)
41 self.wopener = util.opener(self.root)
49 self.wopener = util.opener(self.root)
42
50
43 try:
51 try:
44 self.ui.readconfig(self.join("hgrc"), self.root)
52 self.ui.readconfig(self.join("hgrc"), self.root)
45 except IOError:
53 except IOError:
46 pass
54 pass
47
55
48 v = self.ui.revlogopts
56 v = self.ui.revlogopts
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
57 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
58 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
51 fl = v.get('flags', None)
59 fl = v.get('flags', None)
52 flags = 0
60 flags = 0
53 if fl != None:
61 if fl != None:
54 for x in fl.split():
62 for x in fl.split():
55 flags |= revlog.flagstr(x)
63 flags |= revlog.flagstr(x)
56 elif self.revlogv1:
64 elif self.revlogv1:
57 flags = revlog.REVLOG_DEFAULT_FLAGS
65 flags = revlog.REVLOG_DEFAULT_FLAGS
58
66
59 v = self.revlogversion | flags
67 v = self.revlogversion | flags
60 self.manifest = manifest.manifest(self.opener, v)
68 self.manifest = manifest.manifest(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
69 self.changelog = changelog.changelog(self.opener, v)
62
70
63 # the changelog might not have the inline index flag
71 # the changelog might not have the inline index flag
64 # on. If the format of the changelog is the same as found in
72 # on. If the format of the changelog is the same as found in
65 # .hgrc, apply any flags found in the .hgrc as well.
73 # .hgrc, apply any flags found in the .hgrc as well.
66 # Otherwise, just version from the changelog
74 # Otherwise, just version from the changelog
67 v = self.changelog.version
75 v = self.changelog.version
68 if v == self.revlogversion:
76 if v == self.revlogversion:
69 v |= flags
77 v |= flags
70 self.revlogversion = v
78 self.revlogversion = v
71
79
72 self.tagscache = None
80 self.tagscache = None
73 self.nodetagscache = None
81 self.nodetagscache = None
74 self.encodepats = None
82 self.encodepats = None
75 self.decodepats = None
83 self.decodepats = None
76 self.transhandle = None
84 self.transhandle = None
77
85
78 if create:
79 if not os.path.exists(path):
80 os.mkdir(path)
81 os.mkdir(self.path)
82 os.mkdir(self.join("data"))
83
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
86 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
85
87
86 def url(self):
88 def url(self):
87 return 'file:' + self.root
89 return 'file:' + self.root
88
90
89 def hook(self, name, throw=False, **args):
91 def hook(self, name, throw=False, **args):
90 def callhook(hname, funcname):
92 def callhook(hname, funcname):
91 '''call python hook. hook is callable object, looked up as
93 '''call python hook. hook is callable object, looked up as
92 name in python module. if callable returns "true", hook
94 name in python module. if callable returns "true", hook
93 fails, else passes. if hook raises exception, treated as
95 fails, else passes. if hook raises exception, treated as
94 hook failure. exception propagates if throw is "true".
96 hook failure. exception propagates if throw is "true".
95
97
96 reason for "true" meaning "hook failed" is so that
98 reason for "true" meaning "hook failed" is so that
97 unmodified commands (e.g. mercurial.commands.update) can
99 unmodified commands (e.g. mercurial.commands.update) can
98 be run as hooks without wrappers to convert return values.'''
100 be run as hooks without wrappers to convert return values.'''
99
101
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
102 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
101 d = funcname.rfind('.')
103 d = funcname.rfind('.')
102 if d == -1:
104 if d == -1:
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
105 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
104 % (hname, funcname))
106 % (hname, funcname))
105 modname = funcname[:d]
107 modname = funcname[:d]
106 try:
108 try:
107 obj = __import__(modname)
109 obj = __import__(modname)
108 except ImportError:
110 except ImportError:
109 try:
111 try:
110 # extensions are loaded with hgext_ prefix
112 # extensions are loaded with hgext_ prefix
111 obj = __import__("hgext_%s" % modname)
113 obj = __import__("hgext_%s" % modname)
112 except ImportError:
114 except ImportError:
113 raise util.Abort(_('%s hook is invalid '
115 raise util.Abort(_('%s hook is invalid '
114 '(import of "%s" failed)') %
116 '(import of "%s" failed)') %
115 (hname, modname))
117 (hname, modname))
116 try:
118 try:
117 for p in funcname.split('.')[1:]:
119 for p in funcname.split('.')[1:]:
118 obj = getattr(obj, p)
120 obj = getattr(obj, p)
119 except AttributeError, err:
121 except AttributeError, err:
120 raise util.Abort(_('%s hook is invalid '
122 raise util.Abort(_('%s hook is invalid '
121 '("%s" is not defined)') %
123 '("%s" is not defined)') %
122 (hname, funcname))
124 (hname, funcname))
123 if not callable(obj):
125 if not callable(obj):
124 raise util.Abort(_('%s hook is invalid '
126 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not callable)') %
127 '("%s" is not callable)') %
126 (hname, funcname))
128 (hname, funcname))
127 try:
129 try:
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
130 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
129 except (KeyboardInterrupt, util.SignalInterrupt):
131 except (KeyboardInterrupt, util.SignalInterrupt):
130 raise
132 raise
131 except Exception, exc:
133 except Exception, exc:
132 if isinstance(exc, util.Abort):
134 if isinstance(exc, util.Abort):
133 self.ui.warn(_('error: %s hook failed: %s\n') %
135 self.ui.warn(_('error: %s hook failed: %s\n') %
134 (hname, exc.args[0] % exc.args[1:]))
136 (hname, exc.args[0] % exc.args[1:]))
135 else:
137 else:
136 self.ui.warn(_('error: %s hook raised an exception: '
138 self.ui.warn(_('error: %s hook raised an exception: '
137 '%s\n') % (hname, exc))
139 '%s\n') % (hname, exc))
138 if throw:
140 if throw:
139 raise
141 raise
140 self.ui.print_exc()
142 self.ui.print_exc()
141 return True
143 return True
142 if r:
144 if r:
143 if throw:
145 if throw:
144 raise util.Abort(_('%s hook failed') % hname)
146 raise util.Abort(_('%s hook failed') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
147 self.ui.warn(_('warning: %s hook failed\n') % hname)
146 return r
148 return r
147
149
148 def runhook(name, cmd):
150 def runhook(name, cmd):
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
151 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
152 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
151 r = util.system(cmd, environ=env, cwd=self.root)
153 r = util.system(cmd, environ=env, cwd=self.root)
152 if r:
154 if r:
153 desc, r = util.explain_exit(r)
155 desc, r = util.explain_exit(r)
154 if throw:
156 if throw:
155 raise util.Abort(_('%s hook %s') % (name, desc))
157 raise util.Abort(_('%s hook %s') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
158 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
157 return r
159 return r
158
160
159 r = False
161 r = False
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
162 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
161 if hname.split(".", 1)[0] == name and cmd]
163 if hname.split(".", 1)[0] == name and cmd]
162 hooks.sort()
164 hooks.sort()
163 for hname, cmd in hooks:
165 for hname, cmd in hooks:
164 if cmd.startswith('python:'):
166 if cmd.startswith('python:'):
165 r = callhook(hname, cmd[7:].strip()) or r
167 r = callhook(hname, cmd[7:].strip()) or r
166 else:
168 else:
167 r = runhook(hname, cmd) or r
169 r = runhook(hname, cmd) or r
168 return r
170 return r
169
171
170 tag_disallowed = ':\r\n'
172 tag_disallowed = ':\r\n'
171
173
172 def tag(self, name, node, message, local, user, date):
174 def tag(self, name, node, message, local, user, date):
173 '''tag a revision with a symbolic name.
175 '''tag a revision with a symbolic name.
174
176
175 if local is True, the tag is stored in a per-repository file.
177 if local is True, the tag is stored in a per-repository file.
176 otherwise, it is stored in the .hgtags file, and a new
178 otherwise, it is stored in the .hgtags file, and a new
177 changeset is committed with the change.
179 changeset is committed with the change.
178
180
179 keyword arguments:
181 keyword arguments:
180
182
181 local: whether to store tag in non-version-controlled file
183 local: whether to store tag in non-version-controlled file
182 (default False)
184 (default False)
183
185
184 message: commit message to use if committing
186 message: commit message to use if committing
185
187
186 user: name of user to use if committing
188 user: name of user to use if committing
187
189
188 date: date tuple to use if committing'''
190 date: date tuple to use if committing'''
189
191
190 for c in self.tag_disallowed:
192 for c in self.tag_disallowed:
191 if c in name:
193 if c in name:
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
194 raise util.Abort(_('%r cannot be used in a tag name') % c)
193
195
194 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
196 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
195
197
196 if local:
198 if local:
197 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
199 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
198 self.hook('tag', node=hex(node), tag=name, local=local)
200 self.hook('tag', node=hex(node), tag=name, local=local)
199 return
201 return
200
202
201 for x in self.status()[:5]:
203 for x in self.status()[:5]:
202 if '.hgtags' in x:
204 if '.hgtags' in x:
203 raise util.Abort(_('working copy of .hgtags is changed '
205 raise util.Abort(_('working copy of .hgtags is changed '
204 '(please commit .hgtags manually)'))
206 '(please commit .hgtags manually)'))
205
207
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
208 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
207 if self.dirstate.state('.hgtags') == '?':
209 if self.dirstate.state('.hgtags') == '?':
208 self.add(['.hgtags'])
210 self.add(['.hgtags'])
209
211
210 self.commit(['.hgtags'], message, user, date)
212 self.commit(['.hgtags'], message, user, date)
211 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
212
214
213 def tags(self):
215 def tags(self):
214 '''return a mapping of tag to node'''
216 '''return a mapping of tag to node'''
215 if not self.tagscache:
217 if not self.tagscache:
216 self.tagscache = {}
218 self.tagscache = {}
217
219
218 def parsetag(line, context):
220 def parsetag(line, context):
219 if not line:
221 if not line:
220 return
222 return
221 s = l.split(" ", 1)
223 s = l.split(" ", 1)
222 if len(s) != 2:
224 if len(s) != 2:
223 self.ui.warn(_("%s: cannot parse entry\n") % context)
225 self.ui.warn(_("%s: cannot parse entry\n") % context)
224 return
226 return
225 node, key = s
227 node, key = s
226 key = key.strip()
228 key = key.strip()
227 try:
229 try:
228 bin_n = bin(node)
230 bin_n = bin(node)
229 except TypeError:
231 except TypeError:
230 self.ui.warn(_("%s: node '%s' is not well formed\n") %
232 self.ui.warn(_("%s: node '%s' is not well formed\n") %
231 (context, node))
233 (context, node))
232 return
234 return
233 if bin_n not in self.changelog.nodemap:
235 if bin_n not in self.changelog.nodemap:
234 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
236 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
235 (context, key))
237 (context, key))
236 return
238 return
237 self.tagscache[key] = bin_n
239 self.tagscache[key] = bin_n
238
240
239 # read the tags file from each head, ending with the tip,
241 # read the tags file from each head, ending with the tip,
240 # and add each tag found to the map, with "newer" ones
242 # and add each tag found to the map, with "newer" ones
241 # taking precedence
243 # taking precedence
242 heads = self.heads()
244 heads = self.heads()
243 heads.reverse()
245 heads.reverse()
244 fl = self.file(".hgtags")
246 fl = self.file(".hgtags")
245 for node in heads:
247 for node in heads:
246 change = self.changelog.read(node)
248 change = self.changelog.read(node)
247 rev = self.changelog.rev(node)
249 rev = self.changelog.rev(node)
248 fn, ff = self.manifest.find(change[0], '.hgtags')
250 fn, ff = self.manifest.find(change[0], '.hgtags')
249 if fn is None: continue
251 if fn is None: continue
250 count = 0
252 count = 0
251 for l in fl.read(fn).splitlines():
253 for l in fl.read(fn).splitlines():
252 count += 1
254 count += 1
253 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
255 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
254 (rev, short(node), count))
256 (rev, short(node), count))
255 try:
257 try:
256 f = self.opener("localtags")
258 f = self.opener("localtags")
257 count = 0
259 count = 0
258 for l in f:
260 for l in f:
259 count += 1
261 count += 1
260 parsetag(l, _("localtags, line %d") % count)
262 parsetag(l, _("localtags, line %d") % count)
261 except IOError:
263 except IOError:
262 pass
264 pass
263
265
264 self.tagscache['tip'] = self.changelog.tip()
266 self.tagscache['tip'] = self.changelog.tip()
265
267
266 return self.tagscache
268 return self.tagscache
267
269
268 def tagslist(self):
270 def tagslist(self):
269 '''return a list of tags ordered by revision'''
271 '''return a list of tags ordered by revision'''
270 l = []
272 l = []
271 for t, n in self.tags().items():
273 for t, n in self.tags().items():
272 try:
274 try:
273 r = self.changelog.rev(n)
275 r = self.changelog.rev(n)
274 except:
276 except:
275 r = -2 # sort to the beginning of the list if unknown
277 r = -2 # sort to the beginning of the list if unknown
276 l.append((r, t, n))
278 l.append((r, t, n))
277 l.sort()
279 l.sort()
278 return [(t, n) for r, t, n in l]
280 return [(t, n) for r, t, n in l]
279
281
280 def nodetags(self, node):
282 def nodetags(self, node):
281 '''return the tags associated with a node'''
283 '''return the tags associated with a node'''
282 if not self.nodetagscache:
284 if not self.nodetagscache:
283 self.nodetagscache = {}
285 self.nodetagscache = {}
284 for t, n in self.tags().items():
286 for t, n in self.tags().items():
285 self.nodetagscache.setdefault(n, []).append(t)
287 self.nodetagscache.setdefault(n, []).append(t)
286 return self.nodetagscache.get(node, [])
288 return self.nodetagscache.get(node, [])
287
289
288 def lookup(self, key):
290 def lookup(self, key):
289 try:
291 try:
290 return self.tags()[key]
292 return self.tags()[key]
291 except KeyError:
293 except KeyError:
292 if key == '.':
294 if key == '.':
293 key = self.dirstate.parents()[0]
295 key = self.dirstate.parents()[0]
294 if key == nullid:
296 if key == nullid:
295 raise repo.RepoError(_("no revision checked out"))
297 raise repo.RepoError(_("no revision checked out"))
296 try:
298 try:
297 return self.changelog.lookup(key)
299 return self.changelog.lookup(key)
298 except:
300 except:
299 raise repo.RepoError(_("unknown revision '%s'") % key)
301 raise repo.RepoError(_("unknown revision '%s'") % key)
300
302
301 def dev(self):
303 def dev(self):
302 return os.lstat(self.path).st_dev
304 return os.lstat(self.path).st_dev
303
305
304 def local(self):
306 def local(self):
305 return True
307 return True
306
308
307 def join(self, f):
309 def join(self, f):
308 return os.path.join(self.path, f)
310 return os.path.join(self.path, f)
309
311
310 def wjoin(self, f):
312 def wjoin(self, f):
311 return os.path.join(self.root, f)
313 return os.path.join(self.root, f)
312
314
313 def file(self, f):
315 def file(self, f):
314 if f[0] == '/':
316 if f[0] == '/':
315 f = f[1:]
317 f = f[1:]
316 return filelog.filelog(self.opener, f, self.revlogversion)
318 return filelog.filelog(self.opener, f, self.revlogversion)
317
319
318 def changectx(self, changeid):
320 def changectx(self, changeid):
319 return context.changectx(self, changeid)
321 return context.changectx(self, changeid)
320
322
321 def filectx(self, path, changeid=None, fileid=None):
323 def filectx(self, path, changeid=None, fileid=None):
322 """changeid can be a changeset revision, node, or tag.
324 """changeid can be a changeset revision, node, or tag.
323 fileid can be a file revision or node."""
325 fileid can be a file revision or node."""
324 return context.filectx(self, path, changeid, fileid)
326 return context.filectx(self, path, changeid, fileid)
325
327
326 def getcwd(self):
328 def getcwd(self):
327 return self.dirstate.getcwd()
329 return self.dirstate.getcwd()
328
330
329 def wfile(self, f, mode='r'):
331 def wfile(self, f, mode='r'):
330 return self.wopener(f, mode)
332 return self.wopener(f, mode)
331
333
332 def wread(self, filename):
334 def wread(self, filename):
333 if self.encodepats == None:
335 if self.encodepats == None:
334 l = []
336 l = []
335 for pat, cmd in self.ui.configitems("encode"):
337 for pat, cmd in self.ui.configitems("encode"):
336 mf = util.matcher(self.root, "", [pat], [], [])[1]
338 mf = util.matcher(self.root, "", [pat], [], [])[1]
337 l.append((mf, cmd))
339 l.append((mf, cmd))
338 self.encodepats = l
340 self.encodepats = l
339
341
340 data = self.wopener(filename, 'r').read()
342 data = self.wopener(filename, 'r').read()
341
343
342 for mf, cmd in self.encodepats:
344 for mf, cmd in self.encodepats:
343 if mf(filename):
345 if mf(filename):
344 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
346 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
345 data = util.filter(data, cmd)
347 data = util.filter(data, cmd)
346 break
348 break
347
349
348 return data
350 return data
349
351
350 def wwrite(self, filename, data, fd=None):
352 def wwrite(self, filename, data, fd=None):
351 if self.decodepats == None:
353 if self.decodepats == None:
352 l = []
354 l = []
353 for pat, cmd in self.ui.configitems("decode"):
355 for pat, cmd in self.ui.configitems("decode"):
354 mf = util.matcher(self.root, "", [pat], [], [])[1]
356 mf = util.matcher(self.root, "", [pat], [], [])[1]
355 l.append((mf, cmd))
357 l.append((mf, cmd))
356 self.decodepats = l
358 self.decodepats = l
357
359
358 for mf, cmd in self.decodepats:
360 for mf, cmd in self.decodepats:
359 if mf(filename):
361 if mf(filename):
360 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
362 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
361 data = util.filter(data, cmd)
363 data = util.filter(data, cmd)
362 break
364 break
363
365
364 if fd:
366 if fd:
365 return fd.write(data)
367 return fd.write(data)
366 return self.wopener(filename, 'w').write(data)
368 return self.wopener(filename, 'w').write(data)
367
369
368 def transaction(self):
370 def transaction(self):
369 tr = self.transhandle
371 tr = self.transhandle
370 if tr != None and tr.running():
372 if tr != None and tr.running():
371 return tr.nest()
373 return tr.nest()
372
374
373 # save dirstate for rollback
375 # save dirstate for rollback
374 try:
376 try:
375 ds = self.opener("dirstate").read()
377 ds = self.opener("dirstate").read()
376 except IOError:
378 except IOError:
377 ds = ""
379 ds = ""
378 self.opener("journal.dirstate", "w").write(ds)
380 self.opener("journal.dirstate", "w").write(ds)
379
381
380 tr = transaction.transaction(self.ui.warn, self.opener,
382 tr = transaction.transaction(self.ui.warn, self.opener,
381 self.join("journal"),
383 self.join("journal"),
382 aftertrans(self.path))
384 aftertrans(self.path))
383 self.transhandle = tr
385 self.transhandle = tr
384 return tr
386 return tr
385
387
386 def recover(self):
388 def recover(self):
387 l = self.lock()
389 l = self.lock()
388 if os.path.exists(self.join("journal")):
390 if os.path.exists(self.join("journal")):
389 self.ui.status(_("rolling back interrupted transaction\n"))
391 self.ui.status(_("rolling back interrupted transaction\n"))
390 transaction.rollback(self.opener, self.join("journal"))
392 transaction.rollback(self.opener, self.join("journal"))
391 self.reload()
393 self.reload()
392 return True
394 return True
393 else:
395 else:
394 self.ui.warn(_("no interrupted transaction available\n"))
396 self.ui.warn(_("no interrupted transaction available\n"))
395 return False
397 return False
396
398
397 def rollback(self, wlock=None):
399 def rollback(self, wlock=None):
398 if not wlock:
400 if not wlock:
399 wlock = self.wlock()
401 wlock = self.wlock()
400 l = self.lock()
402 l = self.lock()
401 if os.path.exists(self.join("undo")):
403 if os.path.exists(self.join("undo")):
402 self.ui.status(_("rolling back last transaction\n"))
404 self.ui.status(_("rolling back last transaction\n"))
403 transaction.rollback(self.opener, self.join("undo"))
405 transaction.rollback(self.opener, self.join("undo"))
404 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
406 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
405 self.reload()
407 self.reload()
406 self.wreload()
408 self.wreload()
407 else:
409 else:
408 self.ui.warn(_("no rollback information available\n"))
410 self.ui.warn(_("no rollback information available\n"))
409
411
410 def wreload(self):
412 def wreload(self):
411 self.dirstate.read()
413 self.dirstate.read()
412
414
413 def reload(self):
415 def reload(self):
414 self.changelog.load()
416 self.changelog.load()
415 self.manifest.load()
417 self.manifest.load()
416 self.tagscache = None
418 self.tagscache = None
417 self.nodetagscache = None
419 self.nodetagscache = None
418
420
419 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
421 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
420 desc=None):
422 desc=None):
421 try:
423 try:
422 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
424 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
423 except lock.LockHeld, inst:
425 except lock.LockHeld, inst:
424 if not wait:
426 if not wait:
425 raise
427 raise
426 self.ui.warn(_("waiting for lock on %s held by %s\n") %
428 self.ui.warn(_("waiting for lock on %s held by %s\n") %
427 (desc, inst.args[0]))
429 (desc, inst.args[0]))
428 # default to 600 seconds timeout
430 # default to 600 seconds timeout
429 l = lock.lock(self.join(lockname),
431 l = lock.lock(self.join(lockname),
430 int(self.ui.config("ui", "timeout") or 600),
432 int(self.ui.config("ui", "timeout") or 600),
431 releasefn, desc=desc)
433 releasefn, desc=desc)
432 if acquirefn:
434 if acquirefn:
433 acquirefn()
435 acquirefn()
434 return l
436 return l
435
437
436 def lock(self, wait=1):
438 def lock(self, wait=1):
437 return self.do_lock("lock", wait, acquirefn=self.reload,
439 return self.do_lock("lock", wait, acquirefn=self.reload,
438 desc=_('repository %s') % self.origroot)
440 desc=_('repository %s') % self.origroot)
439
441
440 def wlock(self, wait=1):
442 def wlock(self, wait=1):
441 return self.do_lock("wlock", wait, self.dirstate.write,
443 return self.do_lock("wlock", wait, self.dirstate.write,
442 self.wreload,
444 self.wreload,
443 desc=_('working directory of %s') % self.origroot)
445 desc=_('working directory of %s') % self.origroot)
444
446
445 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
447 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
446 "determine whether a new filenode is needed"
448 "determine whether a new filenode is needed"
447 fp1 = manifest1.get(filename, nullid)
449 fp1 = manifest1.get(filename, nullid)
448 fp2 = manifest2.get(filename, nullid)
450 fp2 = manifest2.get(filename, nullid)
449
451
450 if fp2 != nullid:
452 if fp2 != nullid:
451 # is one parent an ancestor of the other?
453 # is one parent an ancestor of the other?
452 fpa = filelog.ancestor(fp1, fp2)
454 fpa = filelog.ancestor(fp1, fp2)
453 if fpa == fp1:
455 if fpa == fp1:
454 fp1, fp2 = fp2, nullid
456 fp1, fp2 = fp2, nullid
455 elif fpa == fp2:
457 elif fpa == fp2:
456 fp2 = nullid
458 fp2 = nullid
457
459
458 # is the file unmodified from the parent? report existing entry
460 # is the file unmodified from the parent? report existing entry
459 if fp2 == nullid and text == filelog.read(fp1):
461 if fp2 == nullid and text == filelog.read(fp1):
460 return (fp1, None, None)
462 return (fp1, None, None)
461
463
462 return (None, fp1, fp2)
464 return (None, fp1, fp2)
463
465
464 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
466 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
465 orig_parent = self.dirstate.parents()[0] or nullid
467 orig_parent = self.dirstate.parents()[0] or nullid
466 p1 = p1 or self.dirstate.parents()[0] or nullid
468 p1 = p1 or self.dirstate.parents()[0] or nullid
467 p2 = p2 or self.dirstate.parents()[1] or nullid
469 p2 = p2 or self.dirstate.parents()[1] or nullid
468 c1 = self.changelog.read(p1)
470 c1 = self.changelog.read(p1)
469 c2 = self.changelog.read(p2)
471 c2 = self.changelog.read(p2)
470 m1 = self.manifest.read(c1[0]).copy()
472 m1 = self.manifest.read(c1[0]).copy()
471 m2 = self.manifest.read(c2[0])
473 m2 = self.manifest.read(c2[0])
472 changed = []
474 changed = []
473
475
474 if orig_parent == p1:
476 if orig_parent == p1:
475 update_dirstate = 1
477 update_dirstate = 1
476 else:
478 else:
477 update_dirstate = 0
479 update_dirstate = 0
478
480
479 if not wlock:
481 if not wlock:
480 wlock = self.wlock()
482 wlock = self.wlock()
481 l = self.lock()
483 l = self.lock()
482 tr = self.transaction()
484 tr = self.transaction()
483 linkrev = self.changelog.count()
485 linkrev = self.changelog.count()
484 for f in files:
486 for f in files:
485 try:
487 try:
486 t = self.wread(f)
488 t = self.wread(f)
487 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
489 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
488 r = self.file(f)
490 r = self.file(f)
489
491
490 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
492 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
491 if entry:
493 if entry:
492 m1[f] = entry
494 m1[f] = entry
493 continue
495 continue
494
496
495 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
497 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
496 changed.append(f)
498 changed.append(f)
497 if update_dirstate:
499 if update_dirstate:
498 self.dirstate.update([f], "n")
500 self.dirstate.update([f], "n")
499 except IOError:
501 except IOError:
500 try:
502 try:
501 del m1[f]
503 del m1[f]
502 if update_dirstate:
504 if update_dirstate:
503 self.dirstate.forget([f])
505 self.dirstate.forget([f])
504 except:
506 except:
505 # deleted from p2?
507 # deleted from p2?
506 pass
508 pass
507
509
508 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
510 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
509 user = user or self.ui.username()
511 user = user or self.ui.username()
510 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
512 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
511 tr.close()
513 tr.close()
512 if update_dirstate:
514 if update_dirstate:
513 self.dirstate.setparents(n, nullid)
515 self.dirstate.setparents(n, nullid)
514
516
515 def commit(self, files=None, text="", user=None, date=None,
517 def commit(self, files=None, text="", user=None, date=None,
516 match=util.always, force=False, lock=None, wlock=None,
518 match=util.always, force=False, lock=None, wlock=None,
517 force_editor=False):
519 force_editor=False):
518 commit = []
520 commit = []
519 remove = []
521 remove = []
520 changed = []
522 changed = []
521
523
522 if files:
524 if files:
523 for f in files:
525 for f in files:
524 s = self.dirstate.state(f)
526 s = self.dirstate.state(f)
525 if s in 'nmai':
527 if s in 'nmai':
526 commit.append(f)
528 commit.append(f)
527 elif s == 'r':
529 elif s == 'r':
528 remove.append(f)
530 remove.append(f)
529 else:
531 else:
530 self.ui.warn(_("%s not tracked!\n") % f)
532 self.ui.warn(_("%s not tracked!\n") % f)
531 else:
533 else:
532 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
534 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
533 commit = modified + added
535 commit = modified + added
534 remove = removed
536 remove = removed
535
537
536 p1, p2 = self.dirstate.parents()
538 p1, p2 = self.dirstate.parents()
537 c1 = self.changelog.read(p1)
539 c1 = self.changelog.read(p1)
538 c2 = self.changelog.read(p2)
540 c2 = self.changelog.read(p2)
539 m1 = self.manifest.read(c1[0]).copy()
541 m1 = self.manifest.read(c1[0]).copy()
540 m2 = self.manifest.read(c2[0])
542 m2 = self.manifest.read(c2[0])
541
543
542 if not commit and not remove and not force and p2 == nullid:
544 if not commit and not remove and not force and p2 == nullid:
543 self.ui.status(_("nothing changed\n"))
545 self.ui.status(_("nothing changed\n"))
544 return None
546 return None
545
547
546 xp1 = hex(p1)
548 xp1 = hex(p1)
547 if p2 == nullid: xp2 = ''
549 if p2 == nullid: xp2 = ''
548 else: xp2 = hex(p2)
550 else: xp2 = hex(p2)
549
551
550 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
552 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
551
553
552 if not wlock:
554 if not wlock:
553 wlock = self.wlock()
555 wlock = self.wlock()
554 if not lock:
556 if not lock:
555 lock = self.lock()
557 lock = self.lock()
556 tr = self.transaction()
558 tr = self.transaction()
557
559
558 # check in files
560 # check in files
559 new = {}
561 new = {}
560 linkrev = self.changelog.count()
562 linkrev = self.changelog.count()
561 commit.sort()
563 commit.sort()
562 for f in commit:
564 for f in commit:
563 self.ui.note(f + "\n")
565 self.ui.note(f + "\n")
564 try:
566 try:
565 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
566 t = self.wread(f)
568 t = self.wread(f)
567 except IOError:
569 except IOError:
568 self.ui.warn(_("trouble committing %s!\n") % f)
570 self.ui.warn(_("trouble committing %s!\n") % f)
569 raise
571 raise
570
572
571 r = self.file(f)
573 r = self.file(f)
572
574
573 meta = {}
575 meta = {}
574 cp = self.dirstate.copied(f)
576 cp = self.dirstate.copied(f)
575 if cp:
577 if cp:
576 meta["copy"] = cp
578 meta["copy"] = cp
577 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
579 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
578 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
580 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
579 fp1, fp2 = nullid, nullid
581 fp1, fp2 = nullid, nullid
580 else:
582 else:
581 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
583 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
582 if entry:
584 if entry:
583 new[f] = entry
585 new[f] = entry
584 continue
586 continue
585
587
586 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
588 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
587 # remember what we've added so that we can later calculate
589 # remember what we've added so that we can later calculate
588 # the files to pull from a set of changesets
590 # the files to pull from a set of changesets
589 changed.append(f)
591 changed.append(f)
590
592
591 # update manifest
593 # update manifest
592 m1.update(new)
594 m1.update(new)
593 for f in remove:
595 for f in remove:
594 if f in m1:
596 if f in m1:
595 del m1[f]
597 del m1[f]
596 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
598 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
597 (new, remove))
599 (new, remove))
598
600
599 # add changeset
601 # add changeset
600 new = new.keys()
602 new = new.keys()
601 new.sort()
603 new.sort()
602
604
603 user = user or self.ui.username()
605 user = user or self.ui.username()
604 if not text or force_editor:
606 if not text or force_editor:
605 edittext = []
607 edittext = []
606 if text:
608 if text:
607 edittext.append(text)
609 edittext.append(text)
608 edittext.append("")
610 edittext.append("")
609 if p2 != nullid:
611 if p2 != nullid:
610 edittext.append("HG: branch merge")
612 edittext.append("HG: branch merge")
611 edittext.extend(["HG: changed %s" % f for f in changed])
613 edittext.extend(["HG: changed %s" % f for f in changed])
612 edittext.extend(["HG: removed %s" % f for f in remove])
614 edittext.extend(["HG: removed %s" % f for f in remove])
613 if not changed and not remove:
615 if not changed and not remove:
614 edittext.append("HG: no files changed")
616 edittext.append("HG: no files changed")
615 edittext.append("")
617 edittext.append("")
616 # run editor in the repository root
618 # run editor in the repository root
617 olddir = os.getcwd()
619 olddir = os.getcwd()
618 os.chdir(self.root)
620 os.chdir(self.root)
619 text = self.ui.edit("\n".join(edittext), user)
621 text = self.ui.edit("\n".join(edittext), user)
620 os.chdir(olddir)
622 os.chdir(olddir)
621
623
622 lines = [line.rstrip() for line in text.rstrip().splitlines()]
624 lines = [line.rstrip() for line in text.rstrip().splitlines()]
623 while lines and not lines[0]:
625 while lines and not lines[0]:
624 del lines[0]
626 del lines[0]
625 if not lines:
627 if not lines:
626 return None
628 return None
627 text = '\n'.join(lines)
629 text = '\n'.join(lines)
628 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
630 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
629 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
631 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
630 parent2=xp2)
632 parent2=xp2)
631 tr.close()
633 tr.close()
632
634
633 self.dirstate.setparents(n)
635 self.dirstate.setparents(n)
634 self.dirstate.update(new, "n")
636 self.dirstate.update(new, "n")
635 self.dirstate.forget(remove)
637 self.dirstate.forget(remove)
636
638
637 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
639 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
638 return n
640 return n
639
641
640 def walk(self, node=None, files=[], match=util.always, badmatch=None):
642 def walk(self, node=None, files=[], match=util.always, badmatch=None):
641 if node:
643 if node:
642 fdict = dict.fromkeys(files)
644 fdict = dict.fromkeys(files)
643 for fn in self.manifest.read(self.changelog.read(node)[0]):
645 for fn in self.manifest.read(self.changelog.read(node)[0]):
644 for ffn in fdict:
646 for ffn in fdict:
645 # match if the file is the exact name or a directory
647 # match if the file is the exact name or a directory
646 if ffn == fn or fn.startswith("%s/" % ffn):
648 if ffn == fn or fn.startswith("%s/" % ffn):
647 del fdict[ffn]
649 del fdict[ffn]
648 break
650 break
649 if match(fn):
651 if match(fn):
650 yield 'm', fn
652 yield 'm', fn
651 for fn in fdict:
653 for fn in fdict:
652 if badmatch and badmatch(fn):
654 if badmatch and badmatch(fn):
653 if match(fn):
655 if match(fn):
654 yield 'b', fn
656 yield 'b', fn
655 else:
657 else:
656 self.ui.warn(_('%s: No such file in rev %s\n') % (
658 self.ui.warn(_('%s: No such file in rev %s\n') % (
657 util.pathto(self.getcwd(), fn), short(node)))
659 util.pathto(self.getcwd(), fn), short(node)))
658 else:
660 else:
659 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
661 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
660 yield src, fn
662 yield src, fn
661
663
662 def status(self, node1=None, node2=None, files=[], match=util.always,
664 def status(self, node1=None, node2=None, files=[], match=util.always,
663 wlock=None, list_ignored=False, list_clean=False):
665 wlock=None, list_ignored=False, list_clean=False):
664 """return status of files between two nodes or node and working directory
666 """return status of files between two nodes or node and working directory
665
667
666 If node1 is None, use the first dirstate parent instead.
668 If node1 is None, use the first dirstate parent instead.
667 If node2 is None, compare node1 with working directory.
669 If node2 is None, compare node1 with working directory.
668 """
670 """
669
671
670 def fcmp(fn, mf):
672 def fcmp(fn, mf):
671 t1 = self.wread(fn)
673 t1 = self.wread(fn)
672 return self.file(fn).cmp(mf.get(fn, nullid), t1)
674 return self.file(fn).cmp(mf.get(fn, nullid), t1)
673
675
674 def mfmatches(node):
676 def mfmatches(node):
675 change = self.changelog.read(node)
677 change = self.changelog.read(node)
676 mf = dict(self.manifest.read(change[0]))
678 mf = dict(self.manifest.read(change[0]))
677 for fn in mf.keys():
679 for fn in mf.keys():
678 if not match(fn):
680 if not match(fn):
679 del mf[fn]
681 del mf[fn]
680 return mf
682 return mf
681
683
682 modified, added, removed, deleted, unknown = [], [], [], [], []
684 modified, added, removed, deleted, unknown = [], [], [], [], []
683 ignored, clean = [], []
685 ignored, clean = [], []
684
686
685 compareworking = False
687 compareworking = False
686 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
688 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
687 compareworking = True
689 compareworking = True
688
690
689 if not compareworking:
691 if not compareworking:
690 # read the manifest from node1 before the manifest from node2,
692 # read the manifest from node1 before the manifest from node2,
691 # so that we'll hit the manifest cache if we're going through
693 # so that we'll hit the manifest cache if we're going through
692 # all the revisions in parent->child order.
694 # all the revisions in parent->child order.
693 mf1 = mfmatches(node1)
695 mf1 = mfmatches(node1)
694
696
695 # are we comparing the working directory?
697 # are we comparing the working directory?
696 if not node2:
698 if not node2:
697 if not wlock:
699 if not wlock:
698 try:
700 try:
699 wlock = self.wlock(wait=0)
701 wlock = self.wlock(wait=0)
700 except lock.LockException:
702 except lock.LockException:
701 wlock = None
703 wlock = None
702 (lookup, modified, added, removed, deleted, unknown,
704 (lookup, modified, added, removed, deleted, unknown,
703 ignored, clean) = self.dirstate.status(files, match,
705 ignored, clean) = self.dirstate.status(files, match,
704 list_ignored, list_clean)
706 list_ignored, list_clean)
705
707
706 # are we comparing working dir against its parent?
708 # are we comparing working dir against its parent?
707 if compareworking:
709 if compareworking:
708 if lookup:
710 if lookup:
709 # do a full compare of any files that might have changed
711 # do a full compare of any files that might have changed
710 mf2 = mfmatches(self.dirstate.parents()[0])
712 mf2 = mfmatches(self.dirstate.parents()[0])
711 for f in lookup:
713 for f in lookup:
712 if fcmp(f, mf2):
714 if fcmp(f, mf2):
713 modified.append(f)
715 modified.append(f)
714 else:
716 else:
715 clean.append(f)
717 clean.append(f)
716 if wlock is not None:
718 if wlock is not None:
717 self.dirstate.update([f], "n")
719 self.dirstate.update([f], "n")
718 else:
720 else:
719 # we are comparing working dir against non-parent
721 # we are comparing working dir against non-parent
720 # generate a pseudo-manifest for the working dir
722 # generate a pseudo-manifest for the working dir
721 mf2 = mfmatches(self.dirstate.parents()[0])
723 mf2 = mfmatches(self.dirstate.parents()[0])
722 for f in lookup + modified + added:
724 for f in lookup + modified + added:
723 mf2[f] = ""
725 mf2[f] = ""
724 for f in removed:
726 for f in removed:
725 if f in mf2:
727 if f in mf2:
726 del mf2[f]
728 del mf2[f]
727 else:
729 else:
728 # we are comparing two revisions
730 # we are comparing two revisions
729 mf2 = mfmatches(node2)
731 mf2 = mfmatches(node2)
730
732
731 if not compareworking:
733 if not compareworking:
732 # flush lists from dirstate before comparing manifests
734 # flush lists from dirstate before comparing manifests
733 modified, added, clean = [], [], []
735 modified, added, clean = [], [], []
734
736
735 # make sure to sort the files so we talk to the disk in a
737 # make sure to sort the files so we talk to the disk in a
736 # reasonable order
738 # reasonable order
737 mf2keys = mf2.keys()
739 mf2keys = mf2.keys()
738 mf2keys.sort()
740 mf2keys.sort()
739 for fn in mf2keys:
741 for fn in mf2keys:
740 if mf1.has_key(fn):
742 if mf1.has_key(fn):
741 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
743 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
742 modified.append(fn)
744 modified.append(fn)
743 elif list_clean:
745 elif list_clean:
744 clean.append(fn)
746 clean.append(fn)
745 del mf1[fn]
747 del mf1[fn]
746 else:
748 else:
747 added.append(fn)
749 added.append(fn)
748
750
749 removed = mf1.keys()
751 removed = mf1.keys()
750
752
751 # sort and return results:
753 # sort and return results:
752 for l in modified, added, removed, deleted, unknown, ignored, clean:
754 for l in modified, added, removed, deleted, unknown, ignored, clean:
753 l.sort()
755 l.sort()
754 return (modified, added, removed, deleted, unknown, ignored, clean)
756 return (modified, added, removed, deleted, unknown, ignored, clean)
755
757
756 def add(self, list, wlock=None):
758 def add(self, list, wlock=None):
757 if not wlock:
759 if not wlock:
758 wlock = self.wlock()
760 wlock = self.wlock()
759 for f in list:
761 for f in list:
760 p = self.wjoin(f)
762 p = self.wjoin(f)
761 if not os.path.exists(p):
763 if not os.path.exists(p):
762 self.ui.warn(_("%s does not exist!\n") % f)
764 self.ui.warn(_("%s does not exist!\n") % f)
763 elif not os.path.isfile(p):
765 elif not os.path.isfile(p):
764 self.ui.warn(_("%s not added: only files supported currently\n")
766 self.ui.warn(_("%s not added: only files supported currently\n")
765 % f)
767 % f)
766 elif self.dirstate.state(f) in 'an':
768 elif self.dirstate.state(f) in 'an':
767 self.ui.warn(_("%s already tracked!\n") % f)
769 self.ui.warn(_("%s already tracked!\n") % f)
768 else:
770 else:
769 self.dirstate.update([f], "a")
771 self.dirstate.update([f], "a")
770
772
771 def forget(self, list, wlock=None):
773 def forget(self, list, wlock=None):
772 if not wlock:
774 if not wlock:
773 wlock = self.wlock()
775 wlock = self.wlock()
774 for f in list:
776 for f in list:
775 if self.dirstate.state(f) not in 'ai':
777 if self.dirstate.state(f) not in 'ai':
776 self.ui.warn(_("%s not added!\n") % f)
778 self.ui.warn(_("%s not added!\n") % f)
777 else:
779 else:
778 self.dirstate.forget([f])
780 self.dirstate.forget([f])
779
781
780 def remove(self, list, unlink=False, wlock=None):
782 def remove(self, list, unlink=False, wlock=None):
781 if unlink:
783 if unlink:
782 for f in list:
784 for f in list:
783 try:
785 try:
784 util.unlink(self.wjoin(f))
786 util.unlink(self.wjoin(f))
785 except OSError, inst:
787 except OSError, inst:
786 if inst.errno != errno.ENOENT:
788 if inst.errno != errno.ENOENT:
787 raise
789 raise
788 if not wlock:
790 if not wlock:
789 wlock = self.wlock()
791 wlock = self.wlock()
790 for f in list:
792 for f in list:
791 p = self.wjoin(f)
793 p = self.wjoin(f)
792 if os.path.exists(p):
794 if os.path.exists(p):
793 self.ui.warn(_("%s still exists!\n") % f)
795 self.ui.warn(_("%s still exists!\n") % f)
794 elif self.dirstate.state(f) == 'a':
796 elif self.dirstate.state(f) == 'a':
795 self.dirstate.forget([f])
797 self.dirstate.forget([f])
796 elif f not in self.dirstate:
798 elif f not in self.dirstate:
797 self.ui.warn(_("%s not tracked!\n") % f)
799 self.ui.warn(_("%s not tracked!\n") % f)
798 else:
800 else:
799 self.dirstate.update([f], "r")
801 self.dirstate.update([f], "r")
800
802
801 def undelete(self, list, wlock=None):
803 def undelete(self, list, wlock=None):
802 p = self.dirstate.parents()[0]
804 p = self.dirstate.parents()[0]
803 mn = self.changelog.read(p)[0]
805 mn = self.changelog.read(p)[0]
804 m = self.manifest.read(mn)
806 m = self.manifest.read(mn)
805 if not wlock:
807 if not wlock:
806 wlock = self.wlock()
808 wlock = self.wlock()
807 for f in list:
809 for f in list:
808 if self.dirstate.state(f) not in "r":
810 if self.dirstate.state(f) not in "r":
809 self.ui.warn("%s not removed!\n" % f)
811 self.ui.warn("%s not removed!\n" % f)
810 else:
812 else:
811 t = self.file(f).read(m[f])
813 t = self.file(f).read(m[f])
812 self.wwrite(f, t)
814 self.wwrite(f, t)
813 util.set_exec(self.wjoin(f), m.execf(f))
815 util.set_exec(self.wjoin(f), m.execf(f))
814 self.dirstate.update([f], "n")
816 self.dirstate.update([f], "n")
815
817
816 def copy(self, source, dest, wlock=None):
818 def copy(self, source, dest, wlock=None):
817 p = self.wjoin(dest)
819 p = self.wjoin(dest)
818 if not os.path.exists(p):
820 if not os.path.exists(p):
819 self.ui.warn(_("%s does not exist!\n") % dest)
821 self.ui.warn(_("%s does not exist!\n") % dest)
820 elif not os.path.isfile(p):
822 elif not os.path.isfile(p):
821 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
823 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
822 else:
824 else:
823 if not wlock:
825 if not wlock:
824 wlock = self.wlock()
826 wlock = self.wlock()
825 if self.dirstate.state(dest) == '?':
827 if self.dirstate.state(dest) == '?':
826 self.dirstate.update([dest], "a")
828 self.dirstate.update([dest], "a")
827 self.dirstate.copy(source, dest)
829 self.dirstate.copy(source, dest)
828
830
829 def heads(self, start=None):
831 def heads(self, start=None):
830 heads = self.changelog.heads(start)
832 heads = self.changelog.heads(start)
831 # sort the output in rev descending order
833 # sort the output in rev descending order
832 heads = [(-self.changelog.rev(h), h) for h in heads]
834 heads = [(-self.changelog.rev(h), h) for h in heads]
833 heads.sort()
835 heads.sort()
834 return [n for (r, n) in heads]
836 return [n for (r, n) in heads]
835
837
836 # branchlookup returns a dict giving a list of branches for
838 # branchlookup returns a dict giving a list of branches for
837 # each head. A branch is defined as the tag of a node or
839 # each head. A branch is defined as the tag of a node or
838 # the branch of the node's parents. If a node has multiple
840 # the branch of the node's parents. If a node has multiple
839 # branch tags, tags are eliminated if they are visible from other
841 # branch tags, tags are eliminated if they are visible from other
840 # branch tags.
842 # branch tags.
841 #
843 #
842 # So, for this graph: a->b->c->d->e
844 # So, for this graph: a->b->c->d->e
843 # \ /
845 # \ /
844 # aa -----/
846 # aa -----/
845 # a has tag 2.6.12
847 # a has tag 2.6.12
846 # d has tag 2.6.13
848 # d has tag 2.6.13
847 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
849 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
848 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
850 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
849 # from the list.
851 # from the list.
850 #
852 #
851 # It is possible that more than one head will have the same branch tag.
853 # It is possible that more than one head will have the same branch tag.
852 # callers need to check the result for multiple heads under the same
854 # callers need to check the result for multiple heads under the same
853 # branch tag if that is a problem for them (ie checkout of a specific
855 # branch tag if that is a problem for them (ie checkout of a specific
854 # branch).
856 # branch).
855 #
857 #
856 # passing in a specific branch will limit the depth of the search
858 # passing in a specific branch will limit the depth of the search
857 # through the parents. It won't limit the branches returned in the
859 # through the parents. It won't limit the branches returned in the
858 # result though.
860 # result though.
859 def branchlookup(self, heads=None, branch=None):
861 def branchlookup(self, heads=None, branch=None):
860 if not heads:
862 if not heads:
861 heads = self.heads()
863 heads = self.heads()
862 headt = [ h for h in heads ]
864 headt = [ h for h in heads ]
863 chlog = self.changelog
865 chlog = self.changelog
864 branches = {}
866 branches = {}
865 merges = []
867 merges = []
866 seenmerge = {}
868 seenmerge = {}
867
869
868 # traverse the tree once for each head, recording in the branches
870 # traverse the tree once for each head, recording in the branches
869 # dict which tags are visible from this head. The branches
871 # dict which tags are visible from this head. The branches
870 # dict also records which tags are visible from each tag
872 # dict also records which tags are visible from each tag
871 # while we traverse.
873 # while we traverse.
872 while headt or merges:
874 while headt or merges:
873 if merges:
875 if merges:
874 n, found = merges.pop()
876 n, found = merges.pop()
875 visit = [n]
877 visit = [n]
876 else:
878 else:
877 h = headt.pop()
879 h = headt.pop()
878 visit = [h]
880 visit = [h]
879 found = [h]
881 found = [h]
880 seen = {}
882 seen = {}
881 while visit:
883 while visit:
882 n = visit.pop()
884 n = visit.pop()
883 if n in seen:
885 if n in seen:
884 continue
886 continue
885 pp = chlog.parents(n)
887 pp = chlog.parents(n)
886 tags = self.nodetags(n)
888 tags = self.nodetags(n)
887 if tags:
889 if tags:
888 for x in tags:
890 for x in tags:
889 if x == 'tip':
891 if x == 'tip':
890 continue
892 continue
891 for f in found:
893 for f in found:
892 branches.setdefault(f, {})[n] = 1
894 branches.setdefault(f, {})[n] = 1
893 branches.setdefault(n, {})[n] = 1
895 branches.setdefault(n, {})[n] = 1
894 break
896 break
895 if n not in found:
897 if n not in found:
896 found.append(n)
898 found.append(n)
897 if branch in tags:
899 if branch in tags:
898 continue
900 continue
899 seen[n] = 1
901 seen[n] = 1
900 if pp[1] != nullid and n not in seenmerge:
902 if pp[1] != nullid and n not in seenmerge:
901 merges.append((pp[1], [x for x in found]))
903 merges.append((pp[1], [x for x in found]))
902 seenmerge[n] = 1
904 seenmerge[n] = 1
903 if pp[0] != nullid:
905 if pp[0] != nullid:
904 visit.append(pp[0])
906 visit.append(pp[0])
905 # traverse the branches dict, eliminating branch tags from each
907 # traverse the branches dict, eliminating branch tags from each
906 # head that are visible from another branch tag for that head.
908 # head that are visible from another branch tag for that head.
907 out = {}
909 out = {}
908 viscache = {}
910 viscache = {}
909 for h in heads:
911 for h in heads:
910 def visible(node):
912 def visible(node):
911 if node in viscache:
913 if node in viscache:
912 return viscache[node]
914 return viscache[node]
913 ret = {}
915 ret = {}
914 visit = [node]
916 visit = [node]
915 while visit:
917 while visit:
916 x = visit.pop()
918 x = visit.pop()
917 if x in viscache:
919 if x in viscache:
918 ret.update(viscache[x])
920 ret.update(viscache[x])
919 elif x not in ret:
921 elif x not in ret:
920 ret[x] = 1
922 ret[x] = 1
921 if x in branches:
923 if x in branches:
922 visit[len(visit):] = branches[x].keys()
924 visit[len(visit):] = branches[x].keys()
923 viscache[node] = ret
925 viscache[node] = ret
924 return ret
926 return ret
925 if h not in branches:
927 if h not in branches:
926 continue
928 continue
927 # O(n^2), but somewhat limited. This only searches the
929 # O(n^2), but somewhat limited. This only searches the
928 # tags visible from a specific head, not all the tags in the
930 # tags visible from a specific head, not all the tags in the
929 # whole repo.
931 # whole repo.
930 for b in branches[h]:
932 for b in branches[h]:
931 vis = False
933 vis = False
932 for bb in branches[h].keys():
934 for bb in branches[h].keys():
933 if b != bb:
935 if b != bb:
934 if b in visible(bb):
936 if b in visible(bb):
935 vis = True
937 vis = True
936 break
938 break
937 if not vis:
939 if not vis:
938 l = out.setdefault(h, [])
940 l = out.setdefault(h, [])
939 l[len(l):] = self.nodetags(b)
941 l[len(l):] = self.nodetags(b)
940 return out
942 return out
941
943
942 def branches(self, nodes):
944 def branches(self, nodes):
943 if not nodes:
945 if not nodes:
944 nodes = [self.changelog.tip()]
946 nodes = [self.changelog.tip()]
945 b = []
947 b = []
946 for n in nodes:
948 for n in nodes:
947 t = n
949 t = n
948 while 1:
950 while 1:
949 p = self.changelog.parents(n)
951 p = self.changelog.parents(n)
950 if p[1] != nullid or p[0] == nullid:
952 if p[1] != nullid or p[0] == nullid:
951 b.append((t, n, p[0], p[1]))
953 b.append((t, n, p[0], p[1]))
952 break
954 break
953 n = p[0]
955 n = p[0]
954 return b
956 return b
955
957
956 def between(self, pairs):
958 def between(self, pairs):
957 r = []
959 r = []
958
960
959 for top, bottom in pairs:
961 for top, bottom in pairs:
960 n, l, i = top, [], 0
962 n, l, i = top, [], 0
961 f = 1
963 f = 1
962
964
963 while n != bottom:
965 while n != bottom:
964 p = self.changelog.parents(n)[0]
966 p = self.changelog.parents(n)[0]
965 if i == f:
967 if i == f:
966 l.append(n)
968 l.append(n)
967 f = f * 2
969 f = f * 2
968 n = p
970 n = p
969 i += 1
971 i += 1
970
972
971 r.append(l)
973 r.append(l)
972
974
973 return r
975 return r
974
976
975 def findincoming(self, remote, base=None, heads=None, force=False):
977 def findincoming(self, remote, base=None, heads=None, force=False):
976 """Return list of roots of the subsets of missing nodes from remote
978 """Return list of roots of the subsets of missing nodes from remote
977
979
978 If base dict is specified, assume that these nodes and their parents
980 If base dict is specified, assume that these nodes and their parents
979 exist on the remote side and that no child of a node of base exists
981 exist on the remote side and that no child of a node of base exists
980 in both remote and self.
982 in both remote and self.
981 Furthermore base will be updated to include the nodes that exists
983 Furthermore base will be updated to include the nodes that exists
982 in self and remote but no children exists in self and remote.
984 in self and remote but no children exists in self and remote.
983 If a list of heads is specified, return only nodes which are heads
985 If a list of heads is specified, return only nodes which are heads
984 or ancestors of these heads.
986 or ancestors of these heads.
985
987
986 All the ancestors of base are in self and in remote.
988 All the ancestors of base are in self and in remote.
987 All the descendants of the list returned are missing in self.
989 All the descendants of the list returned are missing in self.
988 (and so we know that the rest of the nodes are missing in remote, see
990 (and so we know that the rest of the nodes are missing in remote, see
989 outgoing)
991 outgoing)
990 """
992 """
991 m = self.changelog.nodemap
993 m = self.changelog.nodemap
992 search = []
994 search = []
993 fetch = {}
995 fetch = {}
994 seen = {}
996 seen = {}
995 seenbranch = {}
997 seenbranch = {}
996 if base == None:
998 if base == None:
997 base = {}
999 base = {}
998
1000
999 if not heads:
1001 if not heads:
1000 heads = remote.heads()
1002 heads = remote.heads()
1001
1003
1002 if self.changelog.tip() == nullid:
1004 if self.changelog.tip() == nullid:
1003 base[nullid] = 1
1005 base[nullid] = 1
1004 if heads != [nullid]:
1006 if heads != [nullid]:
1005 return [nullid]
1007 return [nullid]
1006 return []
1008 return []
1007
1009
1008 # assume we're closer to the tip than the root
1010 # assume we're closer to the tip than the root
1009 # and start by examining the heads
1011 # and start by examining the heads
1010 self.ui.status(_("searching for changes\n"))
1012 self.ui.status(_("searching for changes\n"))
1011
1013
1012 unknown = []
1014 unknown = []
1013 for h in heads:
1015 for h in heads:
1014 if h not in m:
1016 if h not in m:
1015 unknown.append(h)
1017 unknown.append(h)
1016 else:
1018 else:
1017 base[h] = 1
1019 base[h] = 1
1018
1020
1019 if not unknown:
1021 if not unknown:
1020 return []
1022 return []
1021
1023
1022 req = dict.fromkeys(unknown)
1024 req = dict.fromkeys(unknown)
1023 reqcnt = 0
1025 reqcnt = 0
1024
1026
1025 # search through remote branches
1027 # search through remote branches
1026 # a 'branch' here is a linear segment of history, with four parts:
1028 # a 'branch' here is a linear segment of history, with four parts:
1027 # head, root, first parent, second parent
1029 # head, root, first parent, second parent
1028 # (a branch always has two parents (or none) by definition)
1030 # (a branch always has two parents (or none) by definition)
1029 unknown = remote.branches(unknown)
1031 unknown = remote.branches(unknown)
1030 while unknown:
1032 while unknown:
1031 r = []
1033 r = []
1032 while unknown:
1034 while unknown:
1033 n = unknown.pop(0)
1035 n = unknown.pop(0)
1034 if n[0] in seen:
1036 if n[0] in seen:
1035 continue
1037 continue
1036
1038
1037 self.ui.debug(_("examining %s:%s\n")
1039 self.ui.debug(_("examining %s:%s\n")
1038 % (short(n[0]), short(n[1])))
1040 % (short(n[0]), short(n[1])))
1039 if n[0] == nullid: # found the end of the branch
1041 if n[0] == nullid: # found the end of the branch
1040 pass
1042 pass
1041 elif n in seenbranch:
1043 elif n in seenbranch:
1042 self.ui.debug(_("branch already found\n"))
1044 self.ui.debug(_("branch already found\n"))
1043 continue
1045 continue
1044 elif n[1] and n[1] in m: # do we know the base?
1046 elif n[1] and n[1] in m: # do we know the base?
1045 self.ui.debug(_("found incomplete branch %s:%s\n")
1047 self.ui.debug(_("found incomplete branch %s:%s\n")
1046 % (short(n[0]), short(n[1])))
1048 % (short(n[0]), short(n[1])))
1047 search.append(n) # schedule branch range for scanning
1049 search.append(n) # schedule branch range for scanning
1048 seenbranch[n] = 1
1050 seenbranch[n] = 1
1049 else:
1051 else:
1050 if n[1] not in seen and n[1] not in fetch:
1052 if n[1] not in seen and n[1] not in fetch:
1051 if n[2] in m and n[3] in m:
1053 if n[2] in m and n[3] in m:
1052 self.ui.debug(_("found new changeset %s\n") %
1054 self.ui.debug(_("found new changeset %s\n") %
1053 short(n[1]))
1055 short(n[1]))
1054 fetch[n[1]] = 1 # earliest unknown
1056 fetch[n[1]] = 1 # earliest unknown
1055 for p in n[2:4]:
1057 for p in n[2:4]:
1056 if p in m:
1058 if p in m:
1057 base[p] = 1 # latest known
1059 base[p] = 1 # latest known
1058
1060
1059 for p in n[2:4]:
1061 for p in n[2:4]:
1060 if p not in req and p not in m:
1062 if p not in req and p not in m:
1061 r.append(p)
1063 r.append(p)
1062 req[p] = 1
1064 req[p] = 1
1063 seen[n[0]] = 1
1065 seen[n[0]] = 1
1064
1066
1065 if r:
1067 if r:
1066 reqcnt += 1
1068 reqcnt += 1
1067 self.ui.debug(_("request %d: %s\n") %
1069 self.ui.debug(_("request %d: %s\n") %
1068 (reqcnt, " ".join(map(short, r))))
1070 (reqcnt, " ".join(map(short, r))))
1069 for p in range(0, len(r), 10):
1071 for p in range(0, len(r), 10):
1070 for b in remote.branches(r[p:p+10]):
1072 for b in remote.branches(r[p:p+10]):
1071 self.ui.debug(_("received %s:%s\n") %
1073 self.ui.debug(_("received %s:%s\n") %
1072 (short(b[0]), short(b[1])))
1074 (short(b[0]), short(b[1])))
1073 unknown.append(b)
1075 unknown.append(b)
1074
1076
1075 # do binary search on the branches we found
1077 # do binary search on the branches we found
1076 while search:
1078 while search:
1077 n = search.pop(0)
1079 n = search.pop(0)
1078 reqcnt += 1
1080 reqcnt += 1
1079 l = remote.between([(n[0], n[1])])[0]
1081 l = remote.between([(n[0], n[1])])[0]
1080 l.append(n[1])
1082 l.append(n[1])
1081 p = n[0]
1083 p = n[0]
1082 f = 1
1084 f = 1
1083 for i in l:
1085 for i in l:
1084 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1086 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1085 if i in m:
1087 if i in m:
1086 if f <= 2:
1088 if f <= 2:
1087 self.ui.debug(_("found new branch changeset %s\n") %
1089 self.ui.debug(_("found new branch changeset %s\n") %
1088 short(p))
1090 short(p))
1089 fetch[p] = 1
1091 fetch[p] = 1
1090 base[i] = 1
1092 base[i] = 1
1091 else:
1093 else:
1092 self.ui.debug(_("narrowed branch search to %s:%s\n")
1094 self.ui.debug(_("narrowed branch search to %s:%s\n")
1093 % (short(p), short(i)))
1095 % (short(p), short(i)))
1094 search.append((p, i))
1096 search.append((p, i))
1095 break
1097 break
1096 p, f = i, f * 2
1098 p, f = i, f * 2
1097
1099
1098 # sanity check our fetch list
1100 # sanity check our fetch list
1099 for f in fetch.keys():
1101 for f in fetch.keys():
1100 if f in m:
1102 if f in m:
1101 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1103 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1102
1104
1103 if base.keys() == [nullid]:
1105 if base.keys() == [nullid]:
1104 if force:
1106 if force:
1105 self.ui.warn(_("warning: repository is unrelated\n"))
1107 self.ui.warn(_("warning: repository is unrelated\n"))
1106 else:
1108 else:
1107 raise util.Abort(_("repository is unrelated"))
1109 raise util.Abort(_("repository is unrelated"))
1108
1110
1109 self.ui.debug(_("found new changesets starting at ") +
1111 self.ui.debug(_("found new changesets starting at ") +
1110 " ".join([short(f) for f in fetch]) + "\n")
1112 " ".join([short(f) for f in fetch]) + "\n")
1111
1113
1112 self.ui.debug(_("%d total queries\n") % reqcnt)
1114 self.ui.debug(_("%d total queries\n") % reqcnt)
1113
1115
1114 return fetch.keys()
1116 return fetch.keys()
1115
1117
1116 def findoutgoing(self, remote, base=None, heads=None, force=False):
1118 def findoutgoing(self, remote, base=None, heads=None, force=False):
1117 """Return list of nodes that are roots of subsets not in remote
1119 """Return list of nodes that are roots of subsets not in remote
1118
1120
1119 If base dict is specified, assume that these nodes and their parents
1121 If base dict is specified, assume that these nodes and their parents
1120 exist on the remote side.
1122 exist on the remote side.
1121 If a list of heads is specified, return only nodes which are heads
1123 If a list of heads is specified, return only nodes which are heads
1122 or ancestors of these heads, and return a second element which
1124 or ancestors of these heads, and return a second element which
1123 contains all remote heads which get new children.
1125 contains all remote heads which get new children.
1124 """
1126 """
1125 if base == None:
1127 if base == None:
1126 base = {}
1128 base = {}
1127 self.findincoming(remote, base, heads, force=force)
1129 self.findincoming(remote, base, heads, force=force)
1128
1130
1129 self.ui.debug(_("common changesets up to ")
1131 self.ui.debug(_("common changesets up to ")
1130 + " ".join(map(short, base.keys())) + "\n")
1132 + " ".join(map(short, base.keys())) + "\n")
1131
1133
1132 remain = dict.fromkeys(self.changelog.nodemap)
1134 remain = dict.fromkeys(self.changelog.nodemap)
1133
1135
1134 # prune everything remote has from the tree
1136 # prune everything remote has from the tree
1135 del remain[nullid]
1137 del remain[nullid]
1136 remove = base.keys()
1138 remove = base.keys()
1137 while remove:
1139 while remove:
1138 n = remove.pop(0)
1140 n = remove.pop(0)
1139 if n in remain:
1141 if n in remain:
1140 del remain[n]
1142 del remain[n]
1141 for p in self.changelog.parents(n):
1143 for p in self.changelog.parents(n):
1142 remove.append(p)
1144 remove.append(p)
1143
1145
1144 # find every node whose parents have been pruned
1146 # find every node whose parents have been pruned
1145 subset = []
1147 subset = []
1146 # find every remote head that will get new children
1148 # find every remote head that will get new children
1147 updated_heads = {}
1149 updated_heads = {}
1148 for n in remain:
1150 for n in remain:
1149 p1, p2 = self.changelog.parents(n)
1151 p1, p2 = self.changelog.parents(n)
1150 if p1 not in remain and p2 not in remain:
1152 if p1 not in remain and p2 not in remain:
1151 subset.append(n)
1153 subset.append(n)
1152 if heads:
1154 if heads:
1153 if p1 in heads:
1155 if p1 in heads:
1154 updated_heads[p1] = True
1156 updated_heads[p1] = True
1155 if p2 in heads:
1157 if p2 in heads:
1156 updated_heads[p2] = True
1158 updated_heads[p2] = True
1157
1159
1158 # this is the set of all roots we have to push
1160 # this is the set of all roots we have to push
1159 if heads:
1161 if heads:
1160 return subset, updated_heads.keys()
1162 return subset, updated_heads.keys()
1161 else:
1163 else:
1162 return subset
1164 return subset
1163
1165
1164 def pull(self, remote, heads=None, force=False, lock=None):
1166 def pull(self, remote, heads=None, force=False, lock=None):
1165 mylock = False
1167 mylock = False
1166 if not lock:
1168 if not lock:
1167 lock = self.lock()
1169 lock = self.lock()
1168 mylock = True
1170 mylock = True
1169
1171
1170 try:
1172 try:
1171 fetch = self.findincoming(remote, force=force)
1173 fetch = self.findincoming(remote, force=force)
1172 if fetch == [nullid]:
1174 if fetch == [nullid]:
1173 self.ui.status(_("requesting all changes\n"))
1175 self.ui.status(_("requesting all changes\n"))
1174
1176
1175 if not fetch:
1177 if not fetch:
1176 self.ui.status(_("no changes found\n"))
1178 self.ui.status(_("no changes found\n"))
1177 return 0
1179 return 0
1178
1180
1179 if heads is None:
1181 if heads is None:
1180 cg = remote.changegroup(fetch, 'pull')
1182 cg = remote.changegroup(fetch, 'pull')
1181 else:
1183 else:
1182 cg = remote.changegroupsubset(fetch, heads, 'pull')
1184 cg = remote.changegroupsubset(fetch, heads, 'pull')
1183 return self.addchangegroup(cg, 'pull', remote.url())
1185 return self.addchangegroup(cg, 'pull', remote.url())
1184 finally:
1186 finally:
1185 if mylock:
1187 if mylock:
1186 lock.release()
1188 lock.release()
1187
1189
1188 def push(self, remote, force=False, revs=None):
1190 def push(self, remote, force=False, revs=None):
1189 # there are two ways to push to remote repo:
1191 # there are two ways to push to remote repo:
1190 #
1192 #
1191 # addchangegroup assumes local user can lock remote
1193 # addchangegroup assumes local user can lock remote
1192 # repo (local filesystem, old ssh servers).
1194 # repo (local filesystem, old ssh servers).
1193 #
1195 #
1194 # unbundle assumes local user cannot lock remote repo (new ssh
1196 # unbundle assumes local user cannot lock remote repo (new ssh
1195 # servers, http servers).
1197 # servers, http servers).
1196
1198
1197 if remote.capable('unbundle'):
1199 if remote.capable('unbundle'):
1198 return self.push_unbundle(remote, force, revs)
1200 return self.push_unbundle(remote, force, revs)
1199 return self.push_addchangegroup(remote, force, revs)
1201 return self.push_addchangegroup(remote, force, revs)
1200
1202
1201 def prepush(self, remote, force, revs):
1203 def prepush(self, remote, force, revs):
1202 base = {}
1204 base = {}
1203 remote_heads = remote.heads()
1205 remote_heads = remote.heads()
1204 inc = self.findincoming(remote, base, remote_heads, force=force)
1206 inc = self.findincoming(remote, base, remote_heads, force=force)
1205 if not force and inc:
1207 if not force and inc:
1206 self.ui.warn(_("abort: unsynced remote changes!\n"))
1208 self.ui.warn(_("abort: unsynced remote changes!\n"))
1207 self.ui.status(_("(did you forget to sync?"
1209 self.ui.status(_("(did you forget to sync?"
1208 " use push -f to force)\n"))
1210 " use push -f to force)\n"))
1209 return None, 1
1211 return None, 1
1210
1212
1211 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1213 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1212 if revs is not None:
1214 if revs is not None:
1213 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1215 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1214 else:
1216 else:
1215 bases, heads = update, self.changelog.heads()
1217 bases, heads = update, self.changelog.heads()
1216
1218
1217 if not bases:
1219 if not bases:
1218 self.ui.status(_("no changes found\n"))
1220 self.ui.status(_("no changes found\n"))
1219 return None, 1
1221 return None, 1
1220 elif not force:
1222 elif not force:
1221 # FIXME we don't properly detect creation of new heads
1223 # FIXME we don't properly detect creation of new heads
1222 # in the push -r case, assume the user knows what he's doing
1224 # in the push -r case, assume the user knows what he's doing
1223 if not revs and len(remote_heads) < len(heads) \
1225 if not revs and len(remote_heads) < len(heads) \
1224 and remote_heads != [nullid]:
1226 and remote_heads != [nullid]:
1225 self.ui.warn(_("abort: push creates new remote branches!\n"))
1227 self.ui.warn(_("abort: push creates new remote branches!\n"))
1226 self.ui.status(_("(did you forget to merge?"
1228 self.ui.status(_("(did you forget to merge?"
1227 " use push -f to force)\n"))
1229 " use push -f to force)\n"))
1228 return None, 1
1230 return None, 1
1229
1231
1230 if revs is None:
1232 if revs is None:
1231 cg = self.changegroup(update, 'push')
1233 cg = self.changegroup(update, 'push')
1232 else:
1234 else:
1233 cg = self.changegroupsubset(update, revs, 'push')
1235 cg = self.changegroupsubset(update, revs, 'push')
1234 return cg, remote_heads
1236 return cg, remote_heads
1235
1237
1236 def push_addchangegroup(self, remote, force, revs):
1238 def push_addchangegroup(self, remote, force, revs):
1237 lock = remote.lock()
1239 lock = remote.lock()
1238
1240
1239 ret = self.prepush(remote, force, revs)
1241 ret = self.prepush(remote, force, revs)
1240 if ret[0] is not None:
1242 if ret[0] is not None:
1241 cg, remote_heads = ret
1243 cg, remote_heads = ret
1242 return remote.addchangegroup(cg, 'push', self.url())
1244 return remote.addchangegroup(cg, 'push', self.url())
1243 return ret[1]
1245 return ret[1]
1244
1246
1245 def push_unbundle(self, remote, force, revs):
1247 def push_unbundle(self, remote, force, revs):
1246 # local repo finds heads on server, finds out what revs it
1248 # local repo finds heads on server, finds out what revs it
1247 # must push. once revs transferred, if server finds it has
1249 # must push. once revs transferred, if server finds it has
1248 # different heads (someone else won commit/push race), server
1250 # different heads (someone else won commit/push race), server
1249 # aborts.
1251 # aborts.
1250
1252
1251 ret = self.prepush(remote, force, revs)
1253 ret = self.prepush(remote, force, revs)
1252 if ret[0] is not None:
1254 if ret[0] is not None:
1253 cg, remote_heads = ret
1255 cg, remote_heads = ret
1254 if force: remote_heads = ['force']
1256 if force: remote_heads = ['force']
1255 return remote.unbundle(cg, remote_heads, 'push')
1257 return remote.unbundle(cg, remote_heads, 'push')
1256 return ret[1]
1258 return ret[1]
1257
1259
1258 def changegroupsubset(self, bases, heads, source):
1260 def changegroupsubset(self, bases, heads, source):
1259 """This function generates a changegroup consisting of all the nodes
1261 """This function generates a changegroup consisting of all the nodes
1260 that are descendents of any of the bases, and ancestors of any of
1262 that are descendents of any of the bases, and ancestors of any of
1261 the heads.
1263 the heads.
1262
1264
1263 It is fairly complex as determining which filenodes and which
1265 It is fairly complex as determining which filenodes and which
1264 manifest nodes need to be included for the changeset to be complete
1266 manifest nodes need to be included for the changeset to be complete
1265 is non-trivial.
1267 is non-trivial.
1266
1268
1267 Another wrinkle is doing the reverse, figuring out which changeset in
1269 Another wrinkle is doing the reverse, figuring out which changeset in
1268 the changegroup a particular filenode or manifestnode belongs to."""
1270 the changegroup a particular filenode or manifestnode belongs to."""
1269
1271
1270 self.hook('preoutgoing', throw=True, source=source)
1272 self.hook('preoutgoing', throw=True, source=source)
1271
1273
1272 # Set up some initial variables
1274 # Set up some initial variables
1273 # Make it easy to refer to self.changelog
1275 # Make it easy to refer to self.changelog
1274 cl = self.changelog
1276 cl = self.changelog
1275 # msng is short for missing - compute the list of changesets in this
1277 # msng is short for missing - compute the list of changesets in this
1276 # changegroup.
1278 # changegroup.
1277 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1279 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1278 # Some bases may turn out to be superfluous, and some heads may be
1280 # Some bases may turn out to be superfluous, and some heads may be
1279 # too. nodesbetween will return the minimal set of bases and heads
1281 # too. nodesbetween will return the minimal set of bases and heads
1280 # necessary to re-create the changegroup.
1282 # necessary to re-create the changegroup.
1281
1283
1282 # Known heads are the list of heads that it is assumed the recipient
1284 # Known heads are the list of heads that it is assumed the recipient
1283 # of this changegroup will know about.
1285 # of this changegroup will know about.
1284 knownheads = {}
1286 knownheads = {}
1285 # We assume that all parents of bases are known heads.
1287 # We assume that all parents of bases are known heads.
1286 for n in bases:
1288 for n in bases:
1287 for p in cl.parents(n):
1289 for p in cl.parents(n):
1288 if p != nullid:
1290 if p != nullid:
1289 knownheads[p] = 1
1291 knownheads[p] = 1
1290 knownheads = knownheads.keys()
1292 knownheads = knownheads.keys()
1291 if knownheads:
1293 if knownheads:
1292 # Now that we know what heads are known, we can compute which
1294 # Now that we know what heads are known, we can compute which
1293 # changesets are known. The recipient must know about all
1295 # changesets are known. The recipient must know about all
1294 # changesets required to reach the known heads from the null
1296 # changesets required to reach the known heads from the null
1295 # changeset.
1297 # changeset.
1296 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1298 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1297 junk = None
1299 junk = None
1298 # Transform the list into an ersatz set.
1300 # Transform the list into an ersatz set.
1299 has_cl_set = dict.fromkeys(has_cl_set)
1301 has_cl_set = dict.fromkeys(has_cl_set)
1300 else:
1302 else:
1301 # If there were no known heads, the recipient cannot be assumed to
1303 # If there were no known heads, the recipient cannot be assumed to
1302 # know about any changesets.
1304 # know about any changesets.
1303 has_cl_set = {}
1305 has_cl_set = {}
1304
1306
1305 # Make it easy to refer to self.manifest
1307 # Make it easy to refer to self.manifest
1306 mnfst = self.manifest
1308 mnfst = self.manifest
1307 # We don't know which manifests are missing yet
1309 # We don't know which manifests are missing yet
1308 msng_mnfst_set = {}
1310 msng_mnfst_set = {}
1309 # Nor do we know which filenodes are missing.
1311 # Nor do we know which filenodes are missing.
1310 msng_filenode_set = {}
1312 msng_filenode_set = {}
1311
1313
1312 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1314 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1313 junk = None
1315 junk = None
1314
1316
1315 # A changeset always belongs to itself, so the changenode lookup
1317 # A changeset always belongs to itself, so the changenode lookup
1316 # function for a changenode is identity.
1318 # function for a changenode is identity.
1317 def identity(x):
1319 def identity(x):
1318 return x
1320 return x
1319
1321
1320 # A function generating function. Sets up an environment for the
1322 # A function generating function. Sets up an environment for the
1321 # inner function.
1323 # inner function.
1322 def cmp_by_rev_func(revlog):
1324 def cmp_by_rev_func(revlog):
1323 # Compare two nodes by their revision number in the environment's
1325 # Compare two nodes by their revision number in the environment's
1324 # revision history. Since the revision number both represents the
1326 # revision history. Since the revision number both represents the
1325 # most efficient order to read the nodes in, and represents a
1327 # most efficient order to read the nodes in, and represents a
1326 # topological sorting of the nodes, this function is often useful.
1328 # topological sorting of the nodes, this function is often useful.
1327 def cmp_by_rev(a, b):
1329 def cmp_by_rev(a, b):
1328 return cmp(revlog.rev(a), revlog.rev(b))
1330 return cmp(revlog.rev(a), revlog.rev(b))
1329 return cmp_by_rev
1331 return cmp_by_rev
1330
1332
1331 # If we determine that a particular file or manifest node must be a
1333 # If we determine that a particular file or manifest node must be a
1332 # node that the recipient of the changegroup will already have, we can
1334 # node that the recipient of the changegroup will already have, we can
1333 # also assume the recipient will have all the parents. This function
1335 # also assume the recipient will have all the parents. This function
1334 # prunes them from the set of missing nodes.
1336 # prunes them from the set of missing nodes.
1335 def prune_parents(revlog, hasset, msngset):
1337 def prune_parents(revlog, hasset, msngset):
1336 haslst = hasset.keys()
1338 haslst = hasset.keys()
1337 haslst.sort(cmp_by_rev_func(revlog))
1339 haslst.sort(cmp_by_rev_func(revlog))
1338 for node in haslst:
1340 for node in haslst:
1339 parentlst = [p for p in revlog.parents(node) if p != nullid]
1341 parentlst = [p for p in revlog.parents(node) if p != nullid]
1340 while parentlst:
1342 while parentlst:
1341 n = parentlst.pop()
1343 n = parentlst.pop()
1342 if n not in hasset:
1344 if n not in hasset:
1343 hasset[n] = 1
1345 hasset[n] = 1
1344 p = [p for p in revlog.parents(n) if p != nullid]
1346 p = [p for p in revlog.parents(n) if p != nullid]
1345 parentlst.extend(p)
1347 parentlst.extend(p)
1346 for n in hasset:
1348 for n in hasset:
1347 msngset.pop(n, None)
1349 msngset.pop(n, None)
1348
1350
1349 # This is a function generating function used to set up an environment
1351 # This is a function generating function used to set up an environment
1350 # for the inner function to execute in.
1352 # for the inner function to execute in.
1351 def manifest_and_file_collector(changedfileset):
1353 def manifest_and_file_collector(changedfileset):
1352 # This is an information gathering function that gathers
1354 # This is an information gathering function that gathers
1353 # information from each changeset node that goes out as part of
1355 # information from each changeset node that goes out as part of
1354 # the changegroup. The information gathered is a list of which
1356 # the changegroup. The information gathered is a list of which
1355 # manifest nodes are potentially required (the recipient may
1357 # manifest nodes are potentially required (the recipient may
1356 # already have them) and total list of all files which were
1358 # already have them) and total list of all files which were
1357 # changed in any changeset in the changegroup.
1359 # changed in any changeset in the changegroup.
1358 #
1360 #
1359 # We also remember the first changenode we saw any manifest
1361 # We also remember the first changenode we saw any manifest
1360 # referenced by so we can later determine which changenode 'owns'
1362 # referenced by so we can later determine which changenode 'owns'
1361 # the manifest.
1363 # the manifest.
1362 def collect_manifests_and_files(clnode):
1364 def collect_manifests_and_files(clnode):
1363 c = cl.read(clnode)
1365 c = cl.read(clnode)
1364 for f in c[3]:
1366 for f in c[3]:
1365 # This is to make sure we only have one instance of each
1367 # This is to make sure we only have one instance of each
1366 # filename string for each filename.
1368 # filename string for each filename.
1367 changedfileset.setdefault(f, f)
1369 changedfileset.setdefault(f, f)
1368 msng_mnfst_set.setdefault(c[0], clnode)
1370 msng_mnfst_set.setdefault(c[0], clnode)
1369 return collect_manifests_and_files
1371 return collect_manifests_and_files
1370
1372
1371 # Figure out which manifest nodes (of the ones we think might be part
1373 # Figure out which manifest nodes (of the ones we think might be part
1372 # of the changegroup) the recipient must know about and remove them
1374 # of the changegroup) the recipient must know about and remove them
1373 # from the changegroup.
1375 # from the changegroup.
1374 def prune_manifests():
1376 def prune_manifests():
1375 has_mnfst_set = {}
1377 has_mnfst_set = {}
1376 for n in msng_mnfst_set:
1378 for n in msng_mnfst_set:
1377 # If a 'missing' manifest thinks it belongs to a changenode
1379 # If a 'missing' manifest thinks it belongs to a changenode
1378 # the recipient is assumed to have, obviously the recipient
1380 # the recipient is assumed to have, obviously the recipient
1379 # must have that manifest.
1381 # must have that manifest.
1380 linknode = cl.node(mnfst.linkrev(n))
1382 linknode = cl.node(mnfst.linkrev(n))
1381 if linknode in has_cl_set:
1383 if linknode in has_cl_set:
1382 has_mnfst_set[n] = 1
1384 has_mnfst_set[n] = 1
1383 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1385 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1384
1386
1385 # Use the information collected in collect_manifests_and_files to say
1387 # Use the information collected in collect_manifests_and_files to say
1386 # which changenode any manifestnode belongs to.
1388 # which changenode any manifestnode belongs to.
1387 def lookup_manifest_link(mnfstnode):
1389 def lookup_manifest_link(mnfstnode):
1388 return msng_mnfst_set[mnfstnode]
1390 return msng_mnfst_set[mnfstnode]
1389
1391
1390 # A function generating function that sets up the initial environment
1392 # A function generating function that sets up the initial environment
1391 # the inner function.
1393 # the inner function.
1392 def filenode_collector(changedfiles):
1394 def filenode_collector(changedfiles):
1393 next_rev = [0]
1395 next_rev = [0]
1394 # This gathers information from each manifestnode included in the
1396 # This gathers information from each manifestnode included in the
1395 # changegroup about which filenodes the manifest node references
1397 # changegroup about which filenodes the manifest node references
1396 # so we can include those in the changegroup too.
1398 # so we can include those in the changegroup too.
1397 #
1399 #
1398 # It also remembers which changenode each filenode belongs to. It
1400 # It also remembers which changenode each filenode belongs to. It
1399 # does this by assuming the a filenode belongs to the changenode
1401 # does this by assuming the a filenode belongs to the changenode
1400 # the first manifest that references it belongs to.
1402 # the first manifest that references it belongs to.
1401 def collect_msng_filenodes(mnfstnode):
1403 def collect_msng_filenodes(mnfstnode):
1402 r = mnfst.rev(mnfstnode)
1404 r = mnfst.rev(mnfstnode)
1403 if r == next_rev[0]:
1405 if r == next_rev[0]:
1404 # If the last rev we looked at was the one just previous,
1406 # If the last rev we looked at was the one just previous,
1405 # we only need to see a diff.
1407 # we only need to see a diff.
1406 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1408 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1407 # For each line in the delta
1409 # For each line in the delta
1408 for dline in delta.splitlines():
1410 for dline in delta.splitlines():
1409 # get the filename and filenode for that line
1411 # get the filename and filenode for that line
1410 f, fnode = dline.split('\0')
1412 f, fnode = dline.split('\0')
1411 fnode = bin(fnode[:40])
1413 fnode = bin(fnode[:40])
1412 f = changedfiles.get(f, None)
1414 f = changedfiles.get(f, None)
1413 # And if the file is in the list of files we care
1415 # And if the file is in the list of files we care
1414 # about.
1416 # about.
1415 if f is not None:
1417 if f is not None:
1416 # Get the changenode this manifest belongs to
1418 # Get the changenode this manifest belongs to
1417 clnode = msng_mnfst_set[mnfstnode]
1419 clnode = msng_mnfst_set[mnfstnode]
1418 # Create the set of filenodes for the file if
1420 # Create the set of filenodes for the file if
1419 # there isn't one already.
1421 # there isn't one already.
1420 ndset = msng_filenode_set.setdefault(f, {})
1422 ndset = msng_filenode_set.setdefault(f, {})
1421 # And set the filenode's changelog node to the
1423 # And set the filenode's changelog node to the
1422 # manifest's if it hasn't been set already.
1424 # manifest's if it hasn't been set already.
1423 ndset.setdefault(fnode, clnode)
1425 ndset.setdefault(fnode, clnode)
1424 else:
1426 else:
1425 # Otherwise we need a full manifest.
1427 # Otherwise we need a full manifest.
1426 m = mnfst.read(mnfstnode)
1428 m = mnfst.read(mnfstnode)
1427 # For every file in we care about.
1429 # For every file in we care about.
1428 for f in changedfiles:
1430 for f in changedfiles:
1429 fnode = m.get(f, None)
1431 fnode = m.get(f, None)
1430 # If it's in the manifest
1432 # If it's in the manifest
1431 if fnode is not None:
1433 if fnode is not None:
1432 # See comments above.
1434 # See comments above.
1433 clnode = msng_mnfst_set[mnfstnode]
1435 clnode = msng_mnfst_set[mnfstnode]
1434 ndset = msng_filenode_set.setdefault(f, {})
1436 ndset = msng_filenode_set.setdefault(f, {})
1435 ndset.setdefault(fnode, clnode)
1437 ndset.setdefault(fnode, clnode)
1436 # Remember the revision we hope to see next.
1438 # Remember the revision we hope to see next.
1437 next_rev[0] = r + 1
1439 next_rev[0] = r + 1
1438 return collect_msng_filenodes
1440 return collect_msng_filenodes
1439
1441
1440 # We have a list of filenodes we think we need for a file, lets remove
1442 # We have a list of filenodes we think we need for a file, lets remove
1441 # all those we now the recipient must have.
1443 # all those we now the recipient must have.
1442 def prune_filenodes(f, filerevlog):
1444 def prune_filenodes(f, filerevlog):
1443 msngset = msng_filenode_set[f]
1445 msngset = msng_filenode_set[f]
1444 hasset = {}
1446 hasset = {}
1445 # If a 'missing' filenode thinks it belongs to a changenode we
1447 # If a 'missing' filenode thinks it belongs to a changenode we
1446 # assume the recipient must have, then the recipient must have
1448 # assume the recipient must have, then the recipient must have
1447 # that filenode.
1449 # that filenode.
1448 for n in msngset:
1450 for n in msngset:
1449 clnode = cl.node(filerevlog.linkrev(n))
1451 clnode = cl.node(filerevlog.linkrev(n))
1450 if clnode in has_cl_set:
1452 if clnode in has_cl_set:
1451 hasset[n] = 1
1453 hasset[n] = 1
1452 prune_parents(filerevlog, hasset, msngset)
1454 prune_parents(filerevlog, hasset, msngset)
1453
1455
1454 # A function generator function that sets up the a context for the
1456 # A function generator function that sets up the a context for the
1455 # inner function.
1457 # inner function.
1456 def lookup_filenode_link_func(fname):
1458 def lookup_filenode_link_func(fname):
1457 msngset = msng_filenode_set[fname]
1459 msngset = msng_filenode_set[fname]
1458 # Lookup the changenode the filenode belongs to.
1460 # Lookup the changenode the filenode belongs to.
1459 def lookup_filenode_link(fnode):
1461 def lookup_filenode_link(fnode):
1460 return msngset[fnode]
1462 return msngset[fnode]
1461 return lookup_filenode_link
1463 return lookup_filenode_link
1462
1464
1463 # Now that we have all theses utility functions to help out and
1465 # Now that we have all theses utility functions to help out and
1464 # logically divide up the task, generate the group.
1466 # logically divide up the task, generate the group.
1465 def gengroup():
1467 def gengroup():
1466 # The set of changed files starts empty.
1468 # The set of changed files starts empty.
1467 changedfiles = {}
1469 changedfiles = {}
1468 # Create a changenode group generator that will call our functions
1470 # Create a changenode group generator that will call our functions
1469 # back to lookup the owning changenode and collect information.
1471 # back to lookup the owning changenode and collect information.
1470 group = cl.group(msng_cl_lst, identity,
1472 group = cl.group(msng_cl_lst, identity,
1471 manifest_and_file_collector(changedfiles))
1473 manifest_and_file_collector(changedfiles))
1472 for chnk in group:
1474 for chnk in group:
1473 yield chnk
1475 yield chnk
1474
1476
1475 # The list of manifests has been collected by the generator
1477 # The list of manifests has been collected by the generator
1476 # calling our functions back.
1478 # calling our functions back.
1477 prune_manifests()
1479 prune_manifests()
1478 msng_mnfst_lst = msng_mnfst_set.keys()
1480 msng_mnfst_lst = msng_mnfst_set.keys()
1479 # Sort the manifestnodes by revision number.
1481 # Sort the manifestnodes by revision number.
1480 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1482 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1481 # Create a generator for the manifestnodes that calls our lookup
1483 # Create a generator for the manifestnodes that calls our lookup
1482 # and data collection functions back.
1484 # and data collection functions back.
1483 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1485 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1484 filenode_collector(changedfiles))
1486 filenode_collector(changedfiles))
1485 for chnk in group:
1487 for chnk in group:
1486 yield chnk
1488 yield chnk
1487
1489
1488 # These are no longer needed, dereference and toss the memory for
1490 # These are no longer needed, dereference and toss the memory for
1489 # them.
1491 # them.
1490 msng_mnfst_lst = None
1492 msng_mnfst_lst = None
1491 msng_mnfst_set.clear()
1493 msng_mnfst_set.clear()
1492
1494
1493 changedfiles = changedfiles.keys()
1495 changedfiles = changedfiles.keys()
1494 changedfiles.sort()
1496 changedfiles.sort()
1495 # Go through all our files in order sorted by name.
1497 # Go through all our files in order sorted by name.
1496 for fname in changedfiles:
1498 for fname in changedfiles:
1497 filerevlog = self.file(fname)
1499 filerevlog = self.file(fname)
1498 # Toss out the filenodes that the recipient isn't really
1500 # Toss out the filenodes that the recipient isn't really
1499 # missing.
1501 # missing.
1500 if msng_filenode_set.has_key(fname):
1502 if msng_filenode_set.has_key(fname):
1501 prune_filenodes(fname, filerevlog)
1503 prune_filenodes(fname, filerevlog)
1502 msng_filenode_lst = msng_filenode_set[fname].keys()
1504 msng_filenode_lst = msng_filenode_set[fname].keys()
1503 else:
1505 else:
1504 msng_filenode_lst = []
1506 msng_filenode_lst = []
1505 # If any filenodes are left, generate the group for them,
1507 # If any filenodes are left, generate the group for them,
1506 # otherwise don't bother.
1508 # otherwise don't bother.
1507 if len(msng_filenode_lst) > 0:
1509 if len(msng_filenode_lst) > 0:
1508 yield changegroup.genchunk(fname)
1510 yield changegroup.genchunk(fname)
1509 # Sort the filenodes by their revision #
1511 # Sort the filenodes by their revision #
1510 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1512 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1511 # Create a group generator and only pass in a changenode
1513 # Create a group generator and only pass in a changenode
1512 # lookup function as we need to collect no information
1514 # lookup function as we need to collect no information
1513 # from filenodes.
1515 # from filenodes.
1514 group = filerevlog.group(msng_filenode_lst,
1516 group = filerevlog.group(msng_filenode_lst,
1515 lookup_filenode_link_func(fname))
1517 lookup_filenode_link_func(fname))
1516 for chnk in group:
1518 for chnk in group:
1517 yield chnk
1519 yield chnk
1518 if msng_filenode_set.has_key(fname):
1520 if msng_filenode_set.has_key(fname):
1519 # Don't need this anymore, toss it to free memory.
1521 # Don't need this anymore, toss it to free memory.
1520 del msng_filenode_set[fname]
1522 del msng_filenode_set[fname]
1521 # Signal that no more groups are left.
1523 # Signal that no more groups are left.
1522 yield changegroup.closechunk()
1524 yield changegroup.closechunk()
1523
1525
1524 if msng_cl_lst:
1526 if msng_cl_lst:
1525 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1527 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1526
1528
1527 return util.chunkbuffer(gengroup())
1529 return util.chunkbuffer(gengroup())
1528
1530
1529 def changegroup(self, basenodes, source):
1531 def changegroup(self, basenodes, source):
1530 """Generate a changegroup of all nodes that we have that a recipient
1532 """Generate a changegroup of all nodes that we have that a recipient
1531 doesn't.
1533 doesn't.
1532
1534
1533 This is much easier than the previous function as we can assume that
1535 This is much easier than the previous function as we can assume that
1534 the recipient has any changenode we aren't sending them."""
1536 the recipient has any changenode we aren't sending them."""
1535
1537
1536 self.hook('preoutgoing', throw=True, source=source)
1538 self.hook('preoutgoing', throw=True, source=source)
1537
1539
1538 cl = self.changelog
1540 cl = self.changelog
1539 nodes = cl.nodesbetween(basenodes, None)[0]
1541 nodes = cl.nodesbetween(basenodes, None)[0]
1540 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1542 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1541
1543
1542 def identity(x):
1544 def identity(x):
1543 return x
1545 return x
1544
1546
1545 def gennodelst(revlog):
1547 def gennodelst(revlog):
1546 for r in xrange(0, revlog.count()):
1548 for r in xrange(0, revlog.count()):
1547 n = revlog.node(r)
1549 n = revlog.node(r)
1548 if revlog.linkrev(n) in revset:
1550 if revlog.linkrev(n) in revset:
1549 yield n
1551 yield n
1550
1552
1551 def changed_file_collector(changedfileset):
1553 def changed_file_collector(changedfileset):
1552 def collect_changed_files(clnode):
1554 def collect_changed_files(clnode):
1553 c = cl.read(clnode)
1555 c = cl.read(clnode)
1554 for fname in c[3]:
1556 for fname in c[3]:
1555 changedfileset[fname] = 1
1557 changedfileset[fname] = 1
1556 return collect_changed_files
1558 return collect_changed_files
1557
1559
1558 def lookuprevlink_func(revlog):
1560 def lookuprevlink_func(revlog):
1559 def lookuprevlink(n):
1561 def lookuprevlink(n):
1560 return cl.node(revlog.linkrev(n))
1562 return cl.node(revlog.linkrev(n))
1561 return lookuprevlink
1563 return lookuprevlink
1562
1564
1563 def gengroup():
1565 def gengroup():
1564 # construct a list of all changed files
1566 # construct a list of all changed files
1565 changedfiles = {}
1567 changedfiles = {}
1566
1568
1567 for chnk in cl.group(nodes, identity,
1569 for chnk in cl.group(nodes, identity,
1568 changed_file_collector(changedfiles)):
1570 changed_file_collector(changedfiles)):
1569 yield chnk
1571 yield chnk
1570 changedfiles = changedfiles.keys()
1572 changedfiles = changedfiles.keys()
1571 changedfiles.sort()
1573 changedfiles.sort()
1572
1574
1573 mnfst = self.manifest
1575 mnfst = self.manifest
1574 nodeiter = gennodelst(mnfst)
1576 nodeiter = gennodelst(mnfst)
1575 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1577 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1576 yield chnk
1578 yield chnk
1577
1579
1578 for fname in changedfiles:
1580 for fname in changedfiles:
1579 filerevlog = self.file(fname)
1581 filerevlog = self.file(fname)
1580 nodeiter = gennodelst(filerevlog)
1582 nodeiter = gennodelst(filerevlog)
1581 nodeiter = list(nodeiter)
1583 nodeiter = list(nodeiter)
1582 if nodeiter:
1584 if nodeiter:
1583 yield changegroup.genchunk(fname)
1585 yield changegroup.genchunk(fname)
1584 lookup = lookuprevlink_func(filerevlog)
1586 lookup = lookuprevlink_func(filerevlog)
1585 for chnk in filerevlog.group(nodeiter, lookup):
1587 for chnk in filerevlog.group(nodeiter, lookup):
1586 yield chnk
1588 yield chnk
1587
1589
1588 yield changegroup.closechunk()
1590 yield changegroup.closechunk()
1589
1591
1590 if nodes:
1592 if nodes:
1591 self.hook('outgoing', node=hex(nodes[0]), source=source)
1593 self.hook('outgoing', node=hex(nodes[0]), source=source)
1592
1594
1593 return util.chunkbuffer(gengroup())
1595 return util.chunkbuffer(gengroup())
1594
1596
1595 def addchangegroup(self, source, srctype, url):
1597 def addchangegroup(self, source, srctype, url):
1596 """add changegroup to repo.
1598 """add changegroup to repo.
1597 returns number of heads modified or added + 1."""
1599 returns number of heads modified or added + 1."""
1598
1600
1599 def csmap(x):
1601 def csmap(x):
1600 self.ui.debug(_("add changeset %s\n") % short(x))
1602 self.ui.debug(_("add changeset %s\n") % short(x))
1601 return cl.count()
1603 return cl.count()
1602
1604
1603 def revmap(x):
1605 def revmap(x):
1604 return cl.rev(x)
1606 return cl.rev(x)
1605
1607
1606 if not source:
1608 if not source:
1607 return 0
1609 return 0
1608
1610
1609 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1611 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1610
1612
1611 changesets = files = revisions = 0
1613 changesets = files = revisions = 0
1612
1614
1613 tr = self.transaction()
1615 tr = self.transaction()
1614
1616
1615 # write changelog data to temp files so concurrent readers will not see
1617 # write changelog data to temp files so concurrent readers will not see
1616 # inconsistent view
1618 # inconsistent view
1617 cl = None
1619 cl = None
1618 try:
1620 try:
1619 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1621 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1620
1622
1621 oldheads = len(cl.heads())
1623 oldheads = len(cl.heads())
1622
1624
1623 # pull off the changeset group
1625 # pull off the changeset group
1624 self.ui.status(_("adding changesets\n"))
1626 self.ui.status(_("adding changesets\n"))
1625 cor = cl.count() - 1
1627 cor = cl.count() - 1
1626 chunkiter = changegroup.chunkiter(source)
1628 chunkiter = changegroup.chunkiter(source)
1627 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1629 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1628 raise util.Abort(_("received changelog group is empty"))
1630 raise util.Abort(_("received changelog group is empty"))
1629 cnr = cl.count() - 1
1631 cnr = cl.count() - 1
1630 changesets = cnr - cor
1632 changesets = cnr - cor
1631
1633
1632 # pull off the manifest group
1634 # pull off the manifest group
1633 self.ui.status(_("adding manifests\n"))
1635 self.ui.status(_("adding manifests\n"))
1634 chunkiter = changegroup.chunkiter(source)
1636 chunkiter = changegroup.chunkiter(source)
1635 # no need to check for empty manifest group here:
1637 # no need to check for empty manifest group here:
1636 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1638 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1637 # no new manifest will be created and the manifest group will
1639 # no new manifest will be created and the manifest group will
1638 # be empty during the pull
1640 # be empty during the pull
1639 self.manifest.addgroup(chunkiter, revmap, tr)
1641 self.manifest.addgroup(chunkiter, revmap, tr)
1640
1642
1641 # process the files
1643 # process the files
1642 self.ui.status(_("adding file changes\n"))
1644 self.ui.status(_("adding file changes\n"))
1643 while 1:
1645 while 1:
1644 f = changegroup.getchunk(source)
1646 f = changegroup.getchunk(source)
1645 if not f:
1647 if not f:
1646 break
1648 break
1647 self.ui.debug(_("adding %s revisions\n") % f)
1649 self.ui.debug(_("adding %s revisions\n") % f)
1648 fl = self.file(f)
1650 fl = self.file(f)
1649 o = fl.count()
1651 o = fl.count()
1650 chunkiter = changegroup.chunkiter(source)
1652 chunkiter = changegroup.chunkiter(source)
1651 if fl.addgroup(chunkiter, revmap, tr) is None:
1653 if fl.addgroup(chunkiter, revmap, tr) is None:
1652 raise util.Abort(_("received file revlog group is empty"))
1654 raise util.Abort(_("received file revlog group is empty"))
1653 revisions += fl.count() - o
1655 revisions += fl.count() - o
1654 files += 1
1656 files += 1
1655
1657
1656 cl.writedata()
1658 cl.writedata()
1657 finally:
1659 finally:
1658 if cl:
1660 if cl:
1659 cl.cleanup()
1661 cl.cleanup()
1660
1662
1661 # make changelog see real files again
1663 # make changelog see real files again
1662 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1664 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1663 self.changelog.checkinlinesize(tr)
1665 self.changelog.checkinlinesize(tr)
1664
1666
1665 newheads = len(self.changelog.heads())
1667 newheads = len(self.changelog.heads())
1666 heads = ""
1668 heads = ""
1667 if oldheads and newheads != oldheads:
1669 if oldheads and newheads != oldheads:
1668 heads = _(" (%+d heads)") % (newheads - oldheads)
1670 heads = _(" (%+d heads)") % (newheads - oldheads)
1669
1671
1670 self.ui.status(_("added %d changesets"
1672 self.ui.status(_("added %d changesets"
1671 " with %d changes to %d files%s\n")
1673 " with %d changes to %d files%s\n")
1672 % (changesets, revisions, files, heads))
1674 % (changesets, revisions, files, heads))
1673
1675
1674 if changesets > 0:
1676 if changesets > 0:
1675 self.hook('pretxnchangegroup', throw=True,
1677 self.hook('pretxnchangegroup', throw=True,
1676 node=hex(self.changelog.node(cor+1)), source=srctype,
1678 node=hex(self.changelog.node(cor+1)), source=srctype,
1677 url=url)
1679 url=url)
1678
1680
1679 tr.close()
1681 tr.close()
1680
1682
1681 if changesets > 0:
1683 if changesets > 0:
1682 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1684 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1683 source=srctype, url=url)
1685 source=srctype, url=url)
1684
1686
1685 for i in range(cor + 1, cnr + 1):
1687 for i in range(cor + 1, cnr + 1):
1686 self.hook("incoming", node=hex(self.changelog.node(i)),
1688 self.hook("incoming", node=hex(self.changelog.node(i)),
1687 source=srctype, url=url)
1689 source=srctype, url=url)
1688
1690
1689 return newheads - oldheads + 1
1691 return newheads - oldheads + 1
1690
1692
1691
1693
1692 def stream_in(self, remote):
1694 def stream_in(self, remote):
1693 fp = remote.stream_out()
1695 fp = remote.stream_out()
1694 resp = int(fp.readline())
1696 resp = int(fp.readline())
1695 if resp != 0:
1697 if resp != 0:
1696 raise util.Abort(_('operation forbidden by server'))
1698 raise util.Abort(_('operation forbidden by server'))
1697 self.ui.status(_('streaming all changes\n'))
1699 self.ui.status(_('streaming all changes\n'))
1698 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1700 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1699 self.ui.status(_('%d files to transfer, %s of data\n') %
1701 self.ui.status(_('%d files to transfer, %s of data\n') %
1700 (total_files, util.bytecount(total_bytes)))
1702 (total_files, util.bytecount(total_bytes)))
1701 start = time.time()
1703 start = time.time()
1702 for i in xrange(total_files):
1704 for i in xrange(total_files):
1703 name, size = fp.readline().split('\0', 1)
1705 name, size = fp.readline().split('\0', 1)
1704 size = int(size)
1706 size = int(size)
1705 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1707 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1706 ofp = self.opener(name, 'w')
1708 ofp = self.opener(name, 'w')
1707 for chunk in util.filechunkiter(fp, limit=size):
1709 for chunk in util.filechunkiter(fp, limit=size):
1708 ofp.write(chunk)
1710 ofp.write(chunk)
1709 ofp.close()
1711 ofp.close()
1710 elapsed = time.time() - start
1712 elapsed = time.time() - start
1711 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1713 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1712 (util.bytecount(total_bytes), elapsed,
1714 (util.bytecount(total_bytes), elapsed,
1713 util.bytecount(total_bytes / elapsed)))
1715 util.bytecount(total_bytes / elapsed)))
1714 self.reload()
1716 self.reload()
1715 return len(self.heads()) + 1
1717 return len(self.heads()) + 1
1716
1718
1717 def clone(self, remote, heads=[], stream=False):
1719 def clone(self, remote, heads=[], stream=False):
1718 '''clone remote repository.
1720 '''clone remote repository.
1719
1721
1720 keyword arguments:
1722 keyword arguments:
1721 heads: list of revs to clone (forces use of pull)
1723 heads: list of revs to clone (forces use of pull)
1722 stream: use streaming clone if possible'''
1724 stream: use streaming clone if possible'''
1723
1725
1724 # now, all clients that can request uncompressed clones can
1726 # now, all clients that can request uncompressed clones can
1725 # read repo formats supported by all servers that can serve
1727 # read repo formats supported by all servers that can serve
1726 # them.
1728 # them.
1727
1729
1728 # if revlog format changes, client will have to check version
1730 # if revlog format changes, client will have to check version
1729 # and format flags on "stream" capability, and use
1731 # and format flags on "stream" capability, and use
1730 # uncompressed only if compatible.
1732 # uncompressed only if compatible.
1731
1733
1732 if stream and not heads and remote.capable('stream'):
1734 if stream and not heads and remote.capable('stream'):
1733 return self.stream_in(remote)
1735 return self.stream_in(remote)
1734 return self.pull(remote, heads)
1736 return self.pull(remote, heads)
1735
1737
1736 # used to avoid circular references so destructors work
1738 # used to avoid circular references so destructors work
1737 def aftertrans(base):
1739 def aftertrans(base):
1738 p = base
1740 p = base
1739 def a():
1741 def a():
1740 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1742 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1741 util.rename(os.path.join(p, "journal.dirstate"),
1743 util.rename(os.path.join(p, "journal.dirstate"),
1742 os.path.join(p, "undo.dirstate"))
1744 os.path.join(p, "undo.dirstate"))
1743 return a
1745 return a
1744
1746
1745 def instance(ui, path, create):
1747 def instance(ui, path, create):
1746 return localrepository(ui, util.drop_scheme('file', path), create)
1748 return localrepository(ui, util.drop_scheme('file', path), create)
1747
1749
1748 def islocal(path):
1750 def islocal(path):
1749 return True
1751 return True
@@ -1,208 +1,214 b''
1 # sshrepo.py - ssh repository proxy class for mercurial
1 # sshrepo.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from remoterepo import *
9 from remoterepo import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "hg os re stat util")
12 demandload(globals(), "hg os re stat util")
13
13
14 class sshrepository(remoterepository):
14 class sshrepository(remoterepository):
15 def __init__(self, ui, path, create=0):
15 def __init__(self, ui, path, create=0):
16 self._url = path
16 self._url = path
17 self.ui = ui
17 self.ui = ui
18
18
19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
20 if not m:
20 if not m:
21 raise hg.RepoError(_("couldn't parse location %s") % path)
21 raise hg.RepoError(_("couldn't parse location %s") % path)
22
22
23 self.user = m.group(2)
23 self.user = m.group(2)
24 self.host = m.group(3)
24 self.host = m.group(3)
25 self.port = m.group(5)
25 self.port = m.group(5)
26 self.path = m.group(7) or "."
26 self.path = m.group(7) or "."
27
27
28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
29 args = self.port and ("%s -p %s") % (args, self.port) or args
29 args = self.port and ("%s -p %s") % (args, self.port) or args
30
30
31 sshcmd = self.ui.config("ui", "ssh", "ssh")
31 sshcmd = self.ui.config("ui", "ssh", "ssh")
32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
33
33
34 if create:
34 if create:
35 try:
35 try:
36 self.validate_repo(ui, sshcmd, args, remotecmd)
36 self.validate_repo(ui, sshcmd, args, remotecmd)
37 return # the repo is good, nothing more to do
38 except hg.RepoError:
37 except hg.RepoError:
39 pass
38 pass
39 else:
40 raise hg.RepoError(_("repository %s already exists") % path)
40
41
41 cmd = '%s %s "%s init %s"'
42 cmd = '%s %s "%s init %s"'
42 cmd = cmd % (sshcmd, args, remotecmd, self.path)
43 cmd = cmd % (sshcmd, args, remotecmd, self.path)
43
44
44 ui.note('running %s\n' % cmd)
45 ui.note('running %s\n' % cmd)
45 res = os.system(cmd)
46 res = os.system(cmd)
46 if res != 0:
47 if res != 0:
47 raise hg.RepoError(_("could not create remote repo"))
48 raise hg.RepoError(_("could not create remote repo"))
48
49
49 self.validate_repo(ui, sshcmd, args, remotecmd)
50 self.validate_repo(ui, sshcmd, args, remotecmd)
50
51
51 def url(self):
52 def url(self):
52 return self._url
53 return self._url
53
54
54 def validate_repo(self, ui, sshcmd, args, remotecmd):
55 def validate_repo(self, ui, sshcmd, args, remotecmd):
56 # cleanup up previous run
57 self.cleanup()
58
55 cmd = '%s %s "%s -R %s serve --stdio"'
59 cmd = '%s %s "%s -R %s serve --stdio"'
56 cmd = cmd % (sshcmd, args, remotecmd, self.path)
60 cmd = cmd % (sshcmd, args, remotecmd, self.path)
57
61
58 ui.note('running %s\n' % cmd)
62 ui.note('running %s\n' % cmd)
59 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
63 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
60
64
61 # skip any noise generated by remote shell
65 # skip any noise generated by remote shell
62 self.do_cmd("hello")
66 self.do_cmd("hello")
63 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
67 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
64 lines = ["", "dummy"]
68 lines = ["", "dummy"]
65 max_noise = 500
69 max_noise = 500
66 while lines[-1] and max_noise:
70 while lines[-1] and max_noise:
67 l = r.readline()
71 l = r.readline()
68 self.readerr()
72 self.readerr()
69 if lines[-1] == "1\n" and l == "\n":
73 if lines[-1] == "1\n" and l == "\n":
70 break
74 break
71 if l:
75 if l:
72 ui.debug(_("remote: "), l)
76 ui.debug(_("remote: "), l)
73 lines.append(l)
77 lines.append(l)
74 max_noise -= 1
78 max_noise -= 1
75 else:
79 else:
76 raise hg.RepoError(_("no response from remote hg"))
80 raise hg.RepoError(_("no response from remote hg"))
77
81
78 self.capabilities = ()
82 self.capabilities = ()
79 lines.reverse()
83 lines.reverse()
80 for l in lines:
84 for l in lines:
81 if l.startswith("capabilities:"):
85 if l.startswith("capabilities:"):
82 self.capabilities = l[:-1].split(":")[1].split()
86 self.capabilities = l[:-1].split(":")[1].split()
83 break
87 break
84
88
85 def readerr(self):
89 def readerr(self):
86 while 1:
90 while 1:
87 size = util.fstat(self.pipee).st_size
91 size = util.fstat(self.pipee).st_size
88 if size == 0: break
92 if size == 0: break
89 l = self.pipee.readline()
93 l = self.pipee.readline()
90 if not l: break
94 if not l: break
91 self.ui.status(_("remote: "), l)
95 self.ui.status(_("remote: "), l)
92
96
93 def __del__(self):
97 def cleanup(self):
94 try:
98 try:
95 self.pipeo.close()
99 self.pipeo.close()
96 self.pipei.close()
100 self.pipei.close()
97 # read the error descriptor until EOF
101 # read the error descriptor until EOF
98 for l in self.pipee:
102 for l in self.pipee:
99 self.ui.status(_("remote: "), l)
103 self.ui.status(_("remote: "), l)
100 self.pipee.close()
104 self.pipee.close()
101 except:
105 except:
102 pass
106 pass
103
107
108 __del__ = cleanup
109
104 def do_cmd(self, cmd, **args):
110 def do_cmd(self, cmd, **args):
105 self.ui.debug(_("sending %s command\n") % cmd)
111 self.ui.debug(_("sending %s command\n") % cmd)
106 self.pipeo.write("%s\n" % cmd)
112 self.pipeo.write("%s\n" % cmd)
107 for k, v in args.items():
113 for k, v in args.items():
108 self.pipeo.write("%s %d\n" % (k, len(v)))
114 self.pipeo.write("%s %d\n" % (k, len(v)))
109 self.pipeo.write(v)
115 self.pipeo.write(v)
110 self.pipeo.flush()
116 self.pipeo.flush()
111
117
112 return self.pipei
118 return self.pipei
113
119
114 def call(self, cmd, **args):
120 def call(self, cmd, **args):
115 r = self.do_cmd(cmd, **args)
121 r = self.do_cmd(cmd, **args)
116 l = r.readline()
122 l = r.readline()
117 self.readerr()
123 self.readerr()
118 try:
124 try:
119 l = int(l)
125 l = int(l)
120 except:
126 except:
121 raise hg.RepoError(_("unexpected response '%s'") % l)
127 raise hg.RepoError(_("unexpected response '%s'") % l)
122 return r.read(l)
128 return r.read(l)
123
129
124 def lock(self):
130 def lock(self):
125 self.call("lock")
131 self.call("lock")
126 return remotelock(self)
132 return remotelock(self)
127
133
128 def unlock(self):
134 def unlock(self):
129 self.call("unlock")
135 self.call("unlock")
130
136
131 def heads(self):
137 def heads(self):
132 d = self.call("heads")
138 d = self.call("heads")
133 try:
139 try:
134 return map(bin, d[:-1].split(" "))
140 return map(bin, d[:-1].split(" "))
135 except:
141 except:
136 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
142 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
137
143
138 def branches(self, nodes):
144 def branches(self, nodes):
139 n = " ".join(map(hex, nodes))
145 n = " ".join(map(hex, nodes))
140 d = self.call("branches", nodes=n)
146 d = self.call("branches", nodes=n)
141 try:
147 try:
142 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
148 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
143 return br
149 return br
144 except:
150 except:
145 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
151 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
146
152
147 def between(self, pairs):
153 def between(self, pairs):
148 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
154 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
149 d = self.call("between", pairs=n)
155 d = self.call("between", pairs=n)
150 try:
156 try:
151 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
157 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
152 return p
158 return p
153 except:
159 except:
154 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
160 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
155
161
156 def changegroup(self, nodes, kind):
162 def changegroup(self, nodes, kind):
157 n = " ".join(map(hex, nodes))
163 n = " ".join(map(hex, nodes))
158 return self.do_cmd("changegroup", roots=n)
164 return self.do_cmd("changegroup", roots=n)
159
165
160 def unbundle(self, cg, heads, source):
166 def unbundle(self, cg, heads, source):
161 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
167 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
162 if d:
168 if d:
163 raise hg.RepoError(_("push refused: %s") % d)
169 raise hg.RepoError(_("push refused: %s") % d)
164
170
165 while 1:
171 while 1:
166 d = cg.read(4096)
172 d = cg.read(4096)
167 if not d: break
173 if not d: break
168 self.pipeo.write(str(len(d)) + '\n')
174 self.pipeo.write(str(len(d)) + '\n')
169 self.pipeo.write(d)
175 self.pipeo.write(d)
170 self.readerr()
176 self.readerr()
171
177
172 self.pipeo.write('0\n')
178 self.pipeo.write('0\n')
173 self.pipeo.flush()
179 self.pipeo.flush()
174
180
175 self.readerr()
181 self.readerr()
176 d = self.pipei.readline()
182 d = self.pipei.readline()
177 if d != '\n':
183 if d != '\n':
178 return 1
184 return 1
179
185
180 l = int(self.pipei.readline())
186 l = int(self.pipei.readline())
181 r = self.pipei.read(l)
187 r = self.pipei.read(l)
182 if not r:
188 if not r:
183 return 1
189 return 1
184 return int(r)
190 return int(r)
185
191
186 def addchangegroup(self, cg, source, url):
192 def addchangegroup(self, cg, source, url):
187 d = self.call("addchangegroup")
193 d = self.call("addchangegroup")
188 if d:
194 if d:
189 raise hg.RepoError(_("push refused: %s") % d)
195 raise hg.RepoError(_("push refused: %s") % d)
190 while 1:
196 while 1:
191 d = cg.read(4096)
197 d = cg.read(4096)
192 if not d: break
198 if not d: break
193 self.pipeo.write(d)
199 self.pipeo.write(d)
194 self.readerr()
200 self.readerr()
195
201
196 self.pipeo.flush()
202 self.pipeo.flush()
197
203
198 self.readerr()
204 self.readerr()
199 l = int(self.pipei.readline())
205 l = int(self.pipei.readline())
200 r = self.pipei.read(l)
206 r = self.pipei.read(l)
201 if not r:
207 if not r:
202 return 1
208 return 1
203 return int(r)
209 return int(r)
204
210
205 def stream_out(self):
211 def stream_out(self):
206 return self.do_cmd('stream_out')
212 return self.do_cmd('stream_out')
207
213
208 instance = sshrepository
214 instance = sshrepository
@@ -1,323 +1,323 b''
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import gettext as _
8 from i18n import gettext as _
9 from demandload import *
9 from demandload import *
10 demandload(globals(), "errno getpass os re socket sys tempfile")
10 demandload(globals(), "errno getpass os re socket sys tempfile")
11 demandload(globals(), "ConfigParser mdiff templater traceback util")
11 demandload(globals(), "ConfigParser mdiff templater traceback util")
12
12
13 class ui(object):
13 class ui(object):
14 def __init__(self, verbose=False, debug=False, quiet=False,
14 def __init__(self, verbose=False, debug=False, quiet=False,
15 interactive=True, traceback=False, parentui=None):
15 interactive=True, traceback=False, parentui=None):
16 self.overlay = {}
16 self.overlay = {}
17 if parentui is None:
17 if parentui is None:
18 # this is the parent of all ui children
18 # this is the parent of all ui children
19 self.parentui = None
19 self.parentui = None
20 self.readhooks = []
20 self.readhooks = []
21 self.trusted_users = {}
21 self.trusted_users = {}
22 self.trusted_groups = {}
22 self.trusted_groups = {}
23 self.cdata = ConfigParser.SafeConfigParser()
23 self.cdata = ConfigParser.SafeConfigParser()
24 self.readconfig(util.rcpath())
24 self.readconfig(util.rcpath())
25
25
26 self.quiet = self.configbool("ui", "quiet")
26 self.quiet = self.configbool("ui", "quiet")
27 self.verbose = self.configbool("ui", "verbose")
27 self.verbose = self.configbool("ui", "verbose")
28 self.debugflag = self.configbool("ui", "debug")
28 self.debugflag = self.configbool("ui", "debug")
29 self.interactive = self.configbool("ui", "interactive", True)
29 self.interactive = self.configbool("ui", "interactive", True)
30 self.traceback = traceback
30 self.traceback = traceback
31
31
32 self.updateopts(verbose, debug, quiet, interactive)
32 self.updateopts(verbose, debug, quiet, interactive)
33 self.diffcache = None
33 self.diffcache = None
34 self.header = []
34 self.header = []
35 self.prev_header = []
35 self.prev_header = []
36 self.revlogopts = self.configrevlog()
36 self.revlogopts = self.configrevlog()
37 else:
37 else:
38 # parentui may point to an ui object which is already a child
38 # parentui may point to an ui object which is already a child
39 self.parentui = parentui.parentui or parentui
39 self.parentui = parentui.parentui or parentui
40 self.readhooks = parentui.readhooks[:]
40 self.readhooks = parentui.readhooks[:]
41 self.trusted_users = parentui.trusted_users.copy()
41 self.trusted_users = parentui.trusted_users.copy()
42 self.trusted_groups = parentui.trusted_groups.copy()
42 self.trusted_groups = parentui.trusted_groups.copy()
43 parent_cdata = self.parentui.cdata
43 parent_cdata = self.parentui.cdata
44 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
44 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
45 # make interpolation work
45 # make interpolation work
46 for section in parent_cdata.sections():
46 for section in parent_cdata.sections():
47 self.cdata.add_section(section)
47 self.cdata.add_section(section)
48 for name, value in parent_cdata.items(section, raw=True):
48 for name, value in parent_cdata.items(section, raw=True):
49 self.cdata.set(section, name, value)
49 self.cdata.set(section, name, value)
50
50
51 def __getattr__(self, key):
51 def __getattr__(self, key):
52 return getattr(self.parentui, key)
52 return getattr(self.parentui, key)
53
53
54 def updateopts(self, verbose=False, debug=False, quiet=False,
54 def updateopts(self, verbose=False, debug=False, quiet=False,
55 interactive=True, traceback=False, config=[]):
55 interactive=True, traceback=False, config=[]):
56 self.quiet = (self.quiet or quiet) and not verbose and not debug
56 self.quiet = (self.quiet or quiet) and not verbose and not debug
57 self.verbose = (self.verbose or verbose) or debug
57 self.verbose = ((self.verbose or verbose) or debug) and not self.quiet
58 self.debugflag = (self.debugflag or debug)
58 self.debugflag = (self.debugflag or debug)
59 self.interactive = (self.interactive and interactive)
59 self.interactive = (self.interactive and interactive)
60 self.traceback = self.traceback or traceback
60 self.traceback = self.traceback or traceback
61 for cfg in config:
61 for cfg in config:
62 try:
62 try:
63 name, value = cfg.split('=', 1)
63 name, value = cfg.split('=', 1)
64 section, name = name.split('.', 1)
64 section, name = name.split('.', 1)
65 if not self.cdata.has_section(section):
65 if not self.cdata.has_section(section):
66 self.cdata.add_section(section)
66 self.cdata.add_section(section)
67 if not section or not name:
67 if not section or not name:
68 raise IndexError
68 raise IndexError
69 self.cdata.set(section, name, value)
69 self.cdata.set(section, name, value)
70 except (IndexError, ValueError):
70 except (IndexError, ValueError):
71 raise util.Abort(_('malformed --config option: %s') % cfg)
71 raise util.Abort(_('malformed --config option: %s') % cfg)
72
72
73 def readconfig(self, fn, root=None):
73 def readconfig(self, fn, root=None):
74 if isinstance(fn, basestring):
74 if isinstance(fn, basestring):
75 fn = [fn]
75 fn = [fn]
76 for f in fn:
76 for f in fn:
77 try:
77 try:
78 fp = open(f)
78 fp = open(f)
79 except IOError:
79 except IOError:
80 continue
80 continue
81 if ((self.trusted_users or self.trusted_groups) and
81 if ((self.trusted_users or self.trusted_groups) and
82 '*' not in self.trusted_users and
82 '*' not in self.trusted_users and
83 '*' not in self.trusted_groups):
83 '*' not in self.trusted_groups):
84 st = util.fstat(fp)
84 st = util.fstat(fp)
85 user = util.username(st.st_uid)
85 user = util.username(st.st_uid)
86 group = util.groupname(st.st_gid)
86 group = util.groupname(st.st_gid)
87 if (user not in self.trusted_users and
87 if (user not in self.trusted_users and
88 group not in self.trusted_groups):
88 group not in self.trusted_groups):
89 self.warn(_('not reading file %s from untrusted '
89 self.warn(_('not reading file %s from untrusted '
90 'user %s, group %s\n') % (f, user, group))
90 'user %s, group %s\n') % (f, user, group))
91 continue
91 continue
92 try:
92 try:
93 self.cdata.readfp(fp, f)
93 self.cdata.readfp(fp, f)
94 except ConfigParser.ParsingError, inst:
94 except ConfigParser.ParsingError, inst:
95 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
95 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
96 # translate paths relative to root (or home) into absolute paths
96 # translate paths relative to root (or home) into absolute paths
97 if root is None:
97 if root is None:
98 root = os.path.expanduser('~')
98 root = os.path.expanduser('~')
99 for name, path in self.configitems("paths"):
99 for name, path in self.configitems("paths"):
100 if path and "://" not in path and not os.path.isabs(path):
100 if path and "://" not in path and not os.path.isabs(path):
101 self.cdata.set("paths", name, os.path.join(root, path))
101 self.cdata.set("paths", name, os.path.join(root, path))
102 user = util.username()
102 user = util.username()
103 if user is not None:
103 if user is not None:
104 self.trusted_users[user] = 1
104 self.trusted_users[user] = 1
105 for user in self.configlist('trusted', 'users'):
105 for user in self.configlist('trusted', 'users'):
106 self.trusted_users[user] = 1
106 self.trusted_users[user] = 1
107 for group in self.configlist('trusted', 'groups'):
107 for group in self.configlist('trusted', 'groups'):
108 self.trusted_groups[group] = 1
108 self.trusted_groups[group] = 1
109 for hook in self.readhooks:
109 for hook in self.readhooks:
110 hook(self)
110 hook(self)
111
111
112 def addreadhook(self, hook):
112 def addreadhook(self, hook):
113 self.readhooks.append(hook)
113 self.readhooks.append(hook)
114
114
115 def setconfig(self, section, name, val):
115 def setconfig(self, section, name, val):
116 self.overlay[(section, name)] = val
116 self.overlay[(section, name)] = val
117
117
118 def config(self, section, name, default=None):
118 def config(self, section, name, default=None):
119 if self.overlay.has_key((section, name)):
119 if self.overlay.has_key((section, name)):
120 return self.overlay[(section, name)]
120 return self.overlay[(section, name)]
121 if self.cdata.has_option(section, name):
121 if self.cdata.has_option(section, name):
122 try:
122 try:
123 return self.cdata.get(section, name)
123 return self.cdata.get(section, name)
124 except ConfigParser.InterpolationError, inst:
124 except ConfigParser.InterpolationError, inst:
125 raise util.Abort(_("Error in configuration:\n%s") % inst)
125 raise util.Abort(_("Error in configuration:\n%s") % inst)
126 if self.parentui is None:
126 if self.parentui is None:
127 return default
127 return default
128 else:
128 else:
129 return self.parentui.config(section, name, default)
129 return self.parentui.config(section, name, default)
130
130
131 def configlist(self, section, name, default=None):
131 def configlist(self, section, name, default=None):
132 """Return a list of comma/space separated strings"""
132 """Return a list of comma/space separated strings"""
133 result = self.config(section, name)
133 result = self.config(section, name)
134 if result is None:
134 if result is None:
135 result = default or []
135 result = default or []
136 if isinstance(result, basestring):
136 if isinstance(result, basestring):
137 result = result.replace(",", " ").split()
137 result = result.replace(",", " ").split()
138 return result
138 return result
139
139
140 def configbool(self, section, name, default=False):
140 def configbool(self, section, name, default=False):
141 if self.overlay.has_key((section, name)):
141 if self.overlay.has_key((section, name)):
142 return self.overlay[(section, name)]
142 return self.overlay[(section, name)]
143 if self.cdata.has_option(section, name):
143 if self.cdata.has_option(section, name):
144 try:
144 try:
145 return self.cdata.getboolean(section, name)
145 return self.cdata.getboolean(section, name)
146 except ConfigParser.InterpolationError, inst:
146 except ConfigParser.InterpolationError, inst:
147 raise util.Abort(_("Error in configuration:\n%s") % inst)
147 raise util.Abort(_("Error in configuration:\n%s") % inst)
148 if self.parentui is None:
148 if self.parentui is None:
149 return default
149 return default
150 else:
150 else:
151 return self.parentui.configbool(section, name, default)
151 return self.parentui.configbool(section, name, default)
152
152
153 def has_config(self, section):
153 def has_config(self, section):
154 '''tell whether section exists in config.'''
154 '''tell whether section exists in config.'''
155 return self.cdata.has_section(section)
155 return self.cdata.has_section(section)
156
156
157 def configitems(self, section):
157 def configitems(self, section):
158 items = {}
158 items = {}
159 if self.parentui is not None:
159 if self.parentui is not None:
160 items = dict(self.parentui.configitems(section))
160 items = dict(self.parentui.configitems(section))
161 if self.cdata.has_section(section):
161 if self.cdata.has_section(section):
162 try:
162 try:
163 items.update(dict(self.cdata.items(section)))
163 items.update(dict(self.cdata.items(section)))
164 except ConfigParser.InterpolationError, inst:
164 except ConfigParser.InterpolationError, inst:
165 raise util.Abort(_("Error in configuration:\n%s") % inst)
165 raise util.Abort(_("Error in configuration:\n%s") % inst)
166 x = items.items()
166 x = items.items()
167 x.sort()
167 x.sort()
168 return x
168 return x
169
169
170 def walkconfig(self, seen=None):
170 def walkconfig(self, seen=None):
171 if seen is None:
171 if seen is None:
172 seen = {}
172 seen = {}
173 for (section, name), value in self.overlay.iteritems():
173 for (section, name), value in self.overlay.iteritems():
174 yield section, name, value
174 yield section, name, value
175 seen[section, name] = 1
175 seen[section, name] = 1
176 for section in self.cdata.sections():
176 for section in self.cdata.sections():
177 for name, value in self.cdata.items(section):
177 for name, value in self.cdata.items(section):
178 if (section, name) in seen: continue
178 if (section, name) in seen: continue
179 yield section, name, value.replace('\n', '\\n')
179 yield section, name, value.replace('\n', '\\n')
180 seen[section, name] = 1
180 seen[section, name] = 1
181 if self.parentui is not None:
181 if self.parentui is not None:
182 for parent in self.parentui.walkconfig(seen):
182 for parent in self.parentui.walkconfig(seen):
183 yield parent
183 yield parent
184
184
185 def extensions(self):
185 def extensions(self):
186 result = self.configitems("extensions")
186 result = self.configitems("extensions")
187 for i, (key, value) in enumerate(result):
187 for i, (key, value) in enumerate(result):
188 if value:
188 if value:
189 result[i] = (key, os.path.expanduser(value))
189 result[i] = (key, os.path.expanduser(value))
190 return result
190 return result
191
191
192 def hgignorefiles(self):
192 def hgignorefiles(self):
193 result = []
193 result = []
194 for key, value in self.configitems("ui"):
194 for key, value in self.configitems("ui"):
195 if key == 'ignore' or key.startswith('ignore.'):
195 if key == 'ignore' or key.startswith('ignore.'):
196 result.append(os.path.expanduser(value))
196 result.append(os.path.expanduser(value))
197 return result
197 return result
198
198
199 def configrevlog(self):
199 def configrevlog(self):
200 result = {}
200 result = {}
201 for key, value in self.configitems("revlog"):
201 for key, value in self.configitems("revlog"):
202 result[key.lower()] = value
202 result[key.lower()] = value
203 return result
203 return result
204
204
205 def username(self):
205 def username(self):
206 """Return default username to be used in commits.
206 """Return default username to be used in commits.
207
207
208 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
208 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
209 and stop searching if one of these is set.
209 and stop searching if one of these is set.
210 Abort if found username is an empty string to force specifying
210 Abort if found username is an empty string to force specifying
211 the commit user elsewhere, e.g. with line option or repo hgrc.
211 the commit user elsewhere, e.g. with line option or repo hgrc.
212 If not found, use ($LOGNAME or $USER or $LNAME or
212 If not found, use ($LOGNAME or $USER or $LNAME or
213 $USERNAME) +"@full.hostname".
213 $USERNAME) +"@full.hostname".
214 """
214 """
215 user = os.environ.get("HGUSER")
215 user = os.environ.get("HGUSER")
216 if user is None:
216 if user is None:
217 user = self.config("ui", "username")
217 user = self.config("ui", "username")
218 if user is None:
218 if user is None:
219 user = os.environ.get("EMAIL")
219 user = os.environ.get("EMAIL")
220 if user is None:
220 if user is None:
221 try:
221 try:
222 user = '%s@%s' % (util.getuser(), socket.getfqdn())
222 user = '%s@%s' % (util.getuser(), socket.getfqdn())
223 except KeyError:
223 except KeyError:
224 raise util.Abort(_("Please specify a username."))
224 raise util.Abort(_("Please specify a username."))
225 return user
225 return user
226
226
227 def shortuser(self, user):
227 def shortuser(self, user):
228 """Return a short representation of a user name or email address."""
228 """Return a short representation of a user name or email address."""
229 if not self.verbose: user = util.shortuser(user)
229 if not self.verbose: user = util.shortuser(user)
230 return user
230 return user
231
231
232 def expandpath(self, loc, default=None):
232 def expandpath(self, loc, default=None):
233 """Return repository location relative to cwd or from [paths]"""
233 """Return repository location relative to cwd or from [paths]"""
234 if "://" in loc or os.path.isdir(loc):
234 if "://" in loc or os.path.isdir(loc):
235 return loc
235 return loc
236
236
237 path = self.config("paths", loc)
237 path = self.config("paths", loc)
238 if not path and default is not None:
238 if not path and default is not None:
239 path = self.config("paths", default)
239 path = self.config("paths", default)
240 return path or loc
240 return path or loc
241
241
242 def write(self, *args):
242 def write(self, *args):
243 if self.header:
243 if self.header:
244 if self.header != self.prev_header:
244 if self.header != self.prev_header:
245 self.prev_header = self.header
245 self.prev_header = self.header
246 self.write(*self.header)
246 self.write(*self.header)
247 self.header = []
247 self.header = []
248 for a in args:
248 for a in args:
249 sys.stdout.write(str(a))
249 sys.stdout.write(str(a))
250
250
251 def write_header(self, *args):
251 def write_header(self, *args):
252 for a in args:
252 for a in args:
253 self.header.append(str(a))
253 self.header.append(str(a))
254
254
255 def write_err(self, *args):
255 def write_err(self, *args):
256 try:
256 try:
257 if not sys.stdout.closed: sys.stdout.flush()
257 if not sys.stdout.closed: sys.stdout.flush()
258 for a in args:
258 for a in args:
259 sys.stderr.write(str(a))
259 sys.stderr.write(str(a))
260 except IOError, inst:
260 except IOError, inst:
261 if inst.errno != errno.EPIPE:
261 if inst.errno != errno.EPIPE:
262 raise
262 raise
263
263
264 def flush(self):
264 def flush(self):
265 try: sys.stdout.flush()
265 try: sys.stdout.flush()
266 except: pass
266 except: pass
267 try: sys.stderr.flush()
267 try: sys.stderr.flush()
268 except: pass
268 except: pass
269
269
270 def readline(self):
270 def readline(self):
271 return sys.stdin.readline()[:-1]
271 return sys.stdin.readline()[:-1]
272 def prompt(self, msg, pat=None, default="y"):
272 def prompt(self, msg, pat=None, default="y"):
273 if not self.interactive: return default
273 if not self.interactive: return default
274 while 1:
274 while 1:
275 self.write(msg, " ")
275 self.write(msg, " ")
276 r = self.readline()
276 r = self.readline()
277 if not pat or re.match(pat, r):
277 if not pat or re.match(pat, r):
278 return r
278 return r
279 else:
279 else:
280 self.write(_("unrecognized response\n"))
280 self.write(_("unrecognized response\n"))
281 def getpass(self, prompt=None, default=None):
281 def getpass(self, prompt=None, default=None):
282 if not self.interactive: return default
282 if not self.interactive: return default
283 return getpass.getpass(prompt or _('password: '))
283 return getpass.getpass(prompt or _('password: '))
284 def status(self, *msg):
284 def status(self, *msg):
285 if not self.quiet: self.write(*msg)
285 if not self.quiet: self.write(*msg)
286 def warn(self, *msg):
286 def warn(self, *msg):
287 self.write_err(*msg)
287 self.write_err(*msg)
288 def note(self, *msg):
288 def note(self, *msg):
289 if self.verbose: self.write(*msg)
289 if self.verbose: self.write(*msg)
290 def debug(self, *msg):
290 def debug(self, *msg):
291 if self.debugflag: self.write(*msg)
291 if self.debugflag: self.write(*msg)
292 def edit(self, text, user):
292 def edit(self, text, user):
293 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
293 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
294 text=True)
294 text=True)
295 try:
295 try:
296 f = os.fdopen(fd, "w")
296 f = os.fdopen(fd, "w")
297 f.write(text)
297 f.write(text)
298 f.close()
298 f.close()
299
299
300 editor = (os.environ.get("HGEDITOR") or
300 editor = (os.environ.get("HGEDITOR") or
301 self.config("ui", "editor") or
301 self.config("ui", "editor") or
302 os.environ.get("EDITOR", "vi"))
302 os.environ.get("EDITOR", "vi"))
303
303
304 util.system("%s \"%s\"" % (editor, name),
304 util.system("%s \"%s\"" % (editor, name),
305 environ={'HGUSER': user},
305 environ={'HGUSER': user},
306 onerr=util.Abort, errprefix=_("edit failed"))
306 onerr=util.Abort, errprefix=_("edit failed"))
307
307
308 f = open(name)
308 f = open(name)
309 t = f.read()
309 t = f.read()
310 f.close()
310 f.close()
311 t = re.sub("(?m)^HG:.*\n", "", t)
311 t = re.sub("(?m)^HG:.*\n", "", t)
312 finally:
312 finally:
313 os.unlink(name)
313 os.unlink(name)
314
314
315 return t
315 return t
316
316
317 def print_exc(self):
317 def print_exc(self):
318 '''print exception traceback if traceback printing enabled.
318 '''print exception traceback if traceback printing enabled.
319 only to call in exception handler. returns true if traceback
319 only to call in exception handler. returns true if traceback
320 printed.'''
320 printed.'''
321 if self.traceback:
321 if self.traceback:
322 traceback.print_exc()
322 traceback.print_exc()
323 return self.traceback
323 return self.traceback
@@ -1,50 +1,50 b''
1 default = 'summary'
1 default = 'summary'
2 header = header-gitweb.tmpl
2 header = header-gitweb.tmpl
3 footer = footer-gitweb.tmpl
3 footer = footer-gitweb.tmpl
4 search = search-gitweb.tmpl
4 search = search-gitweb.tmpl
5 changelog = changelog-gitweb.tmpl
5 changelog = changelog-gitweb.tmpl
6 summary = summary-gitweb.tmpl
6 summary = summary-gitweb.tmpl
7 error = error-gitweb.tmpl
7 error = error-gitweb.tmpl
8 naventry = '<a href="?cmd=changelog;rev=#rev#;style=gitweb">#label|escape#</a> '
8 naventry = '<a href="?cmd=changelog;rev=#rev#;style=gitweb">#label|escape#</a> '
9 navshortentry = '<a href="?cmd=shortlog;rev=#rev#;style=gitweb">#label|escape#</a> '
9 navshortentry = '<a href="?cmd=shortlog;rev=#rev#;style=gitweb">#label|escape#</a> '
10 filedifflink = '<a href="?cmd=filediff;node=#node#;file=#file|urlescape#;style=gitweb">#file|escape#</a> '
10 filedifflink = '<a href="?cmd=filediff;node=#node#;file=#file|urlescape#;style=gitweb">#file|escape#</a> '
11 filenodelink = '<tr class="light"><td><a class="list" href="">#file|escape#</a></td><td></td><td class="link"><a href="?cmd=file;filenode=#filenode#;file=#file|urlescape#;style=gitweb">file</a> | <!-- FIXME: <a href="?fd=#filenode|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?cmd=filelog;filenode=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a></td></tr>'
11 filenodelink = '<tr class="light"><td><a class="list" href="">#file|escape#</a></td><td></td><td class="link"><a href="?cmd=file;filenode=#filenode#;file=#file|urlescape#;style=gitweb">file</a> | <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a> | <!-- FIXME: <a href="?fd=#filenode|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?cmd=filelog;filenode=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a></td></tr>'
12 fileellipses = '...'
12 fileellipses = '...'
13 changelogentry = changelogentry-gitweb.tmpl
13 changelogentry = changelogentry-gitweb.tmpl
14 searchentry = changelogentry-gitweb.tmpl
14 searchentry = changelogentry-gitweb.tmpl
15 changeset = changeset-gitweb.tmpl
15 changeset = changeset-gitweb.tmpl
16 manifest = manifest-gitweb.tmpl
16 manifest = manifest-gitweb.tmpl
17 manifestdirentry = '<tr class="parity#parity#"><td style="font-family:monospace">drwxr-xr-x</td><td><a href="?mf=#manifest|short#;path=#path|urlescape#;style=gitweb">#basename|escape#/</a></td><td class="link"><a href="?mf=#manifest|short#;path=#path|urlescape#;style=gitweb">manifest</a></td></tr>'
17 manifestdirentry = '<tr class="parity#parity#"><td style="font-family:monospace">drwxr-xr-x</td><td><a href="?mf=#manifest|short#;path=#path|urlescape#;style=gitweb">#basename|escape#/</a></td><td class="link"><a href="?mf=#manifest|short#;path=#path|urlescape#;style=gitweb">manifest</a></td></tr>'
18 manifestfileentry = '<tr class="parity#parity#"><td style="font-family:monospace">#permissions|permissions#</td><td class="list"><a class="list" href="?f=#filenode|short#;file=#file|urlescape#;style=gitweb">#basename|escape#</a></td><td class="link"><a href="?f=#filenode|short#;file=#file|urlescape#;style=gitweb">file</a> | <a href="?fl=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a> | <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a></td></tr>'
18 manifestfileentry = '<tr class="parity#parity#"><td style="font-family:monospace">#permissions|permissions#</td><td class="list"><a class="list" href="?f=#filenode|short#;file=#file|urlescape#;style=gitweb">#basename|escape#</a></td><td class="link"><a href="?f=#filenode|short#;file=#file|urlescape#;style=gitweb">file</a> | <a href="?fl=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a> | <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a></td></tr>'
19 filerevision = filerevision-gitweb.tmpl
19 filerevision = filerevision-gitweb.tmpl
20 fileannotate = fileannotate-gitweb.tmpl
20 fileannotate = fileannotate-gitweb.tmpl
21 filelog = filelog-gitweb.tmpl
21 filelog = filelog-gitweb.tmpl
22 fileline = '<div style="font-family:monospace" class="parity#parity#"><pre><span class="linenr"> #linenumber#</span> #line|escape#</pre></div>'
22 fileline = '<div style="font-family:monospace" class="parity#parity#"><pre><span class="linenr"> #linenumber#</span> #line|escape#</pre></div>'
23 annotateline = '<tr style="font-family:monospace" class="parity#parity#"><td class="linenr" style="text-align: right;"><a href="?cs=#node|short#;style=gitweb">#author|obfuscate#@#rev#</a></td><td><pre>#line|escape#</pre></td></tr>'
23 annotateline = '<tr style="font-family:monospace" class="parity#parity#"><td class="linenr" style="text-align: right;"><a href="?cs=#node|short#;style=gitweb">#author|obfuscate#@#rev#</a></td><td><pre>#line|escape#</pre></td></tr>'
24 difflineplus = '<div style="color:#008800;">#line|escape#</div>'
24 difflineplus = '<div style="color:#008800;">#line|escape#</div>'
25 difflineminus = '<div style="color:#cc0000;">#line|escape#</div>'
25 difflineminus = '<div style="color:#cc0000;">#line|escape#</div>'
26 difflineat = '<div style="color:#990099;">#line|escape#</div>'
26 difflineat = '<div style="color:#990099;">#line|escape#</div>'
27 diffline = '<div>#line|escape#</div>'
27 diffline = '<div>#line|escape#</div>'
28 changelogparent = '<tr><th class="parent">parent #rev#:</th><td class="parent"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
28 changelogparent = '<tr><th class="parent">parent #rev#:</th><td class="parent"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
29 changesetparent = '<tr><td>parent</td><td style="font-family:monospace"><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb">#node|short#</a></td></tr>'
29 changesetparent = '<tr><td>parent</td><td style="font-family:monospace"><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb">#node|short#</a></td></tr>'
30 filerevparent = '<tr><td class="metatag">parent:</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
30 filerevparent = '<tr><td class="metatag">parent:</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
31 filerename = '<tr><td class="metatag">parent:</td><td><a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">#file|escape#@#node|short#</a></td></tr>'
31 filerename = '<tr><td class="metatag">parent:</td><td><a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">#file|escape#@#node|short#</a></td></tr>'
32 filelogrename = '| <a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">base</a>'
32 filelogrename = '| <a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">base</a>'
33 fileannotateparent = '<tr><td class="metatag">parent:</td><td><a href="?cmd=annotate;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
33 fileannotateparent = '<tr><td class="metatag">parent:</td><td><a href="?cmd=annotate;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
34 changelogchild = '<tr><th class="child">child #rev#:</th><td class="child"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
34 changelogchild = '<tr><th class="child">child #rev#:</th><td class="child"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
35 changesetchild = '<tr><td>child</td><td style="font-family:monospace"><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb">#node|short#</a></td></tr>'
35 changesetchild = '<tr><td>child</td><td style="font-family:monospace"><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb">#node|short#</a></td></tr>'
36 filerevchild = '<tr><td class="metatag">child:</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
36 filerevchild = '<tr><td class="metatag">child:</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
37 fileannotatechild = '<tr><td class="metatag">child:</td><td><a href="?cmd=annotate;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
37 fileannotatechild = '<tr><td class="metatag">child:</td><td><a href="?cmd=annotate;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
38 tags = tags-gitweb.tmpl
38 tags = tags-gitweb.tmpl
39 tagentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#tag|escape#</b></a></td><td class="link"><a href="?cmd=changeset;node=#node|short#;style=gitweb">changeset</a> | <a href="?cmd=changelog;rev=#node|short#;style=gitweb">changelog</a> | <a href="?mf=#tagmanifest|short#;path=/;style=gitweb">manifest</a></td></tr>'
39 tagentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#tag|escape#</b></a></td><td class="link"><a href="?cmd=changeset;node=#node|short#;style=gitweb">changeset</a> | <a href="?cmd=changelog;rev=#node|short#;style=gitweb">changelog</a> | <a href="?mf=#tagmanifest|short#;path=/;style=gitweb">manifest</a></td></tr>'
40 diffblock = '<pre>#lines#</pre>'
40 diffblock = '<pre>#lines#</pre>'
41 changelogtag = '<tr><th class="tag">tag:</th><td class="tag">#tag|escape#</td></tr>'
41 changelogtag = '<tr><th class="tag">tag:</th><td class="tag">#tag|escape#</td></tr>'
42 changesettag = '<tr><td>tag</td><td>#tag|escape#</td></tr>'
42 changesettag = '<tr><td>tag</td><td>#tag|escape#</td></tr>'
43 filediffparent = '<tr><th class="parent">parent #rev#:</th><td class="parent"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
43 filediffparent = '<tr><th class="parent">parent #rev#:</th><td class="parent"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
44 filelogparent = '<tr><td align="right">parent #rev#:&nbsp;</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
44 filelogparent = '<tr><td align="right">parent #rev#:&nbsp;</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
45 filediffchild = '<tr><th class="child">child #rev#:</th><td class="child"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
45 filediffchild = '<tr><th class="child">child #rev#:</th><td class="child"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
46 filelogchild = '<tr><td align="right">child #rev#:&nbsp;</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
46 filelogchild = '<tr><td align="right">child #rev#:&nbsp;</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
47 shortlog = shortlog-gitweb.tmpl
47 shortlog = shortlog-gitweb.tmpl
48 shortlogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><i>#author#</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><a href="?cmd=changeset;node=#node|short#;style=gitweb">changeset</a> | <a href="?cmd=manifest;manifest=#manifest|short#;path=/;style=gitweb">manifest</a></td></tr>'
48 shortlogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><i>#author#</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><a href="?cmd=changeset;node=#node|short#;style=gitweb">changeset</a> | <a href="?cmd=manifest;manifest=#manifest|short#;path=/;style=gitweb">manifest</a></td></tr>'
49 filelogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><!-- FIXME: <a href="?fd=#node|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a> #rename%filelogrename#</td></tr>'
49 filelogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">file</a> | <!-- FIXME: <a href="?fd=#node|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a> #rename%filelogrename#</td></tr>'
50 archiveentry = ' | <a href="?ca=#node|short#;type=#type|urlescape#">#type|escape#</a> '
50 archiveentry = ' | <a href="?ca=#node|short#;type=#type|urlescape#">#type|escape#</a> '
@@ -1,52 +1,61 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 $2
21 $2
22 EOF
22 EOF
23 chmod +x dummyssh
23 chmod +x dummyssh
24
24
25 echo "# creating 'local'"
25 echo "# creating 'local'"
26 hg init local
26 hg init local
27 echo this > local/foo
27 echo this > local/foo
28 hg ci --cwd local -A -m "init" -d "1000000 0"
28 hg ci --cwd local -A -m "init" -d "1000000 0"
29
29
30 echo "#test failure"
31 hg init local
32
30 echo "# init+push to remote2"
33 echo "# init+push to remote2"
31 hg init -e ./dummyssh ssh://user@dummy/remote2
34 hg init -e ./dummyssh ssh://user@dummy/remote2
32 hg incoming -R remote2 local
35 hg incoming -R remote2 local
33 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
36 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
34
37
35 echo "# clone to remote1"
38 echo "# clone to remote1"
36 hg clone -e ./dummyssh local ssh://user@dummy/remote1
39 hg clone -e ./dummyssh local ssh://user@dummy/remote1
37
40
41 echo "# init to existing repo"
42 hg init -e ./dummyssh ssh://user@dummy/remote1
43
44 echo "# clone to existing repo"
45 hg clone -e ./dummyssh local ssh://user@dummy/remote1
46
38 echo "# output of dummyssh"
47 echo "# output of dummyssh"
39 cat dummylog
48 cat dummylog
40
49
41 echo "# comparing repositories"
50 echo "# comparing repositories"
42 hg tip -q -R local
51 hg tip -q -R local
43 hg tip -q -R remote1
52 hg tip -q -R remote1
44 hg tip -q -R remote2
53 hg tip -q -R remote2
45
54
46 echo "# check names for repositories (clashes with URL schemes, special chars)"
55 echo "# check names for repositories (clashes with URL schemes, special chars)"
47 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
56 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
48 echo "# hg init \"$i\""
57 echo "# hg init \"$i\""
49 hg init "$i"
58 hg init "$i"
50 test -d "$i" -a -d "$i/.hg" -a -d "$i/.hg/data" && echo "ok" || echo "failed"
59 test -d "$i" -a -d "$i/.hg" -a -d "$i/.hg/data" && echo "ok" || echo "failed"
51 done
60 done
52
61
@@ -1,56 +1,64 b''
1 # creating 'local'
1 # creating 'local'
2 adding foo
2 adding foo
3 #test failure
4 abort: repository local already exists!
3 # init+push to remote2
5 # init+push to remote2
6 remote: abort: repository remote2 not found!
4 changeset: 0:c4e059d443be
7 changeset: 0:c4e059d443be
5 tag: tip
8 tag: tip
6 user: test
9 user: test
7 date: Mon Jan 12 13:46:40 1970 +0000
10 date: Mon Jan 12 13:46:40 1970 +0000
8 summary: init
11 summary: init
9
12
10 pushing to ssh://user@dummy/remote2
13 pushing to ssh://user@dummy/remote2
11 searching for changes
14 searching for changes
12 remote: adding changesets
15 remote: adding changesets
13 remote: adding manifests
16 remote: adding manifests
14 remote: adding file changes
17 remote: adding file changes
15 remote: added 1 changesets with 1 changes to 1 files
18 remote: added 1 changesets with 1 changes to 1 files
16 # clone to remote1
19 # clone to remote1
20 remote: abort: repository remote1 not found!
17 searching for changes
21 searching for changes
18 remote: abort: repository remote1 not found!
19 remote: adding changesets
22 remote: adding changesets
20 remote: adding manifests
23 remote: adding manifests
21 remote: adding file changes
24 remote: adding file changes
22 remote: added 1 changesets with 1 changes to 1 files
25 remote: added 1 changesets with 1 changes to 1 files
26 # init to existing repo
27 abort: repository ssh://user@dummy/remote1 already exists!
28 # clone to existing repo
29 abort: repository ssh://user@dummy/remote1 already exists!
23 # output of dummyssh
30 # output of dummyssh
24 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
31 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
25 Got arguments 1:user@dummy 2:hg init remote2 3: 4: 5:
32 Got arguments 1:user@dummy 2:hg init remote2 3: 4: 5:
26 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
33 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
27 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
34 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
28 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
35 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
36 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
29 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
37 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
30 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
38 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
31 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
39 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
32 # comparing repositories
40 # comparing repositories
33 0:c4e059d443be
41 0:c4e059d443be
34 0:c4e059d443be
42 0:c4e059d443be
35 0:c4e059d443be
43 0:c4e059d443be
36 # check names for repositories (clashes with URL schemes, special chars)
44 # check names for repositories (clashes with URL schemes, special chars)
37 # hg init "bundle"
45 # hg init "bundle"
38 ok
46 ok
39 # hg init "file"
47 # hg init "file"
40 ok
48 ok
41 # hg init "hg"
49 # hg init "hg"
42 ok
50 ok
43 # hg init "http"
51 # hg init "http"
44 ok
52 ok
45 # hg init "https"
53 # hg init "https"
46 ok
54 ok
47 # hg init "old-http"
55 # hg init "old-http"
48 ok
56 ok
49 # hg init "ssh"
57 # hg init "ssh"
50 ok
58 ok
51 # hg init "static-http"
59 # hg init "static-http"
52 ok
60 ok
53 # hg init " "
61 # hg init " "
54 ok
62 ok
55 # hg init "with space"
63 # hg init "with space"
56 ok
64 ok
General Comments 0
You need to be logged in to leave comments. Login now