##// END OF EJS Templates
hooks: made the callback host configurable....
marcink -
r2833:920dbf8a default
parent child Browse files
Show More
@@ -1,716 +1,720 b''
1 1
2 2
3 3 ################################################################################
4 4 ## RHODECODE COMMUNITY EDITION CONFIGURATION ##
5 5 # The %(here)s variable will be replaced with the parent directory of this file#
6 6 ################################################################################
7 7
8 8 [DEFAULT]
9 9 debug = true
10 10
11 11 ################################################################################
12 12 ## EMAIL CONFIGURATION ##
13 13 ## Uncomment and replace with the email address which should receive ##
14 14 ## any error reports after an application crash ##
15 15 ## Additionally these settings will be used by the RhodeCode mailing system ##
16 16 ################################################################################
17 17
18 18 ## prefix all emails subjects with given prefix, helps filtering out emails
19 19 #email_prefix = [RhodeCode]
20 20
21 21 ## email FROM address all mails will be sent
22 22 #app_email_from = rhodecode-noreply@localhost
23 23
24 24 ## Uncomment and replace with the address which should receive any error report
25 25 ## note: using appenlight for error handling doesn't need this to be uncommented
26 26 #email_to = admin@localhost
27 27
28 28 ## in case of Application errors, sent an error email form
29 29 #error_email_from = rhodecode_error@localhost
30 30
31 31 ## additional error message to be send in case of server crash
32 32 #error_message =
33 33
34 34
35 35 #smtp_server = mail.server.com
36 36 #smtp_username =
37 37 #smtp_password =
38 38 #smtp_port =
39 39 #smtp_use_tls = false
40 40 #smtp_use_ssl = true
41 41 ## Specify available auth parameters here (e.g. LOGIN PLAIN CRAM-MD5, etc.)
42 42 #smtp_auth =
43 43
44 44 [server:main]
45 45 ## COMMON ##
46 46 host = 127.0.0.1
47 47 port = 5000
48 48
49 49 ##################################
50 50 ## WAITRESS WSGI SERVER ##
51 51 ## Recommended for Development ##
52 52 ##################################
53 53
54 54 use = egg:waitress#main
55 55 ## number of worker threads
56 56 threads = 5
57 57 ## MAX BODY SIZE 100GB
58 58 max_request_body_size = 107374182400
59 59 ## Use poll instead of select, fixes file descriptors limits problems.
60 60 ## May not work on old windows systems.
61 61 asyncore_use_poll = true
62 62
63 63
64 64 ##########################
65 65 ## GUNICORN WSGI SERVER ##
66 66 ##########################
67 67 ## run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
68 68
69 69 #use = egg:gunicorn#main
70 70 ## Sets the number of process workers. You must set `instance_id = *`
71 71 ## when this option is set to more than one worker, recommended
72 72 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
73 73 ## The `instance_id = *` must be set in the [app:main] section below
74 74 #workers = 2
75 75 ## number of threads for each of the worker, must be set to 1 for gevent
76 76 ## generally recommended to be at 1
77 77 #threads = 1
78 78 ## process name
79 79 #proc_name = rhodecode
80 80 ## type of worker class, one of sync, gevent
81 81 ## recommended for bigger setup is using of of other than sync one
82 82 #worker_class = gevent
83 83 ## The maximum number of simultaneous clients. Valid only for Gevent
84 84 #worker_connections = 10
85 85 ## max number of requests that worker will handle before being gracefully
86 86 ## restarted, could prevent memory leaks
87 87 #max_requests = 1000
88 88 #max_requests_jitter = 30
89 89 ## amount of time a worker can spend with handling a request before it
90 90 ## gets killed and restarted. Set to 6hrs
91 91 #timeout = 21600
92 92
93 93
94 94 ## prefix middleware for RhodeCode.
95 95 ## recommended when using proxy setup.
96 96 ## allows to set RhodeCode under a prefix in server.
97 97 ## eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
98 98 ## And set your prefix like: `prefix = /custom_prefix`
99 99 ## be sure to also set beaker.session.cookie_path = /custom_prefix if you need
100 100 ## to make your cookies only work on prefix url
101 101 [filter:proxy-prefix]
102 102 use = egg:PasteDeploy#prefix
103 103 prefix = /
104 104
105 105 [app:main]
106 106 use = egg:rhodecode-enterprise-ce
107 107
108 108 ## enable proxy prefix middleware, defined above
109 109 #filter-with = proxy-prefix
110 110
111 111 # During development the we want to have the debug toolbar enabled
112 112 pyramid.includes =
113 113 pyramid_debugtoolbar
114 114 rhodecode.lib.middleware.request_wrapper
115 115
116 116 pyramid.reload_templates = true
117 117
118 118 debugtoolbar.hosts = 0.0.0.0/0
119 119 debugtoolbar.exclude_prefixes =
120 120 /css
121 121 /fonts
122 122 /images
123 123 /js
124 124
125 125 ## RHODECODE PLUGINS ##
126 126 rhodecode.includes =
127 127 rhodecode.api
128 128
129 129
130 130 # api prefix url
131 131 rhodecode.api.url = /_admin/api
132 132
133 133
134 134 ## END RHODECODE PLUGINS ##
135 135
136 136 ## encryption key used to encrypt social plugin tokens,
137 137 ## remote_urls with credentials etc, if not set it defaults to
138 138 ## `beaker.session.secret`
139 139 #rhodecode.encrypted_values.secret =
140 140
141 141 ## decryption strict mode (enabled by default). It controls if decryption raises
142 142 ## `SignatureVerificationError` in case of wrong key, or damaged encryption data.
143 143 #rhodecode.encrypted_values.strict = false
144 144
145 145 ## return gzipped responses from Rhodecode (static files/application)
146 146 gzip_responses = false
147 147
148 148 ## autogenerate javascript routes file on startup
149 149 generate_js_files = false
150 150
151 151 ## Optional Languages
152 152 ## en(default), be, de, es, fr, it, ja, pl, pt, ru, zh
153 153 lang = en
154 154
155 155 ## perform a full repository scan on each server start, this should be
156 156 ## set to false after first startup, to allow faster server restarts.
157 157 startup.import_repos = false
158 158
159 159 ## Uncomment and set this path to use archive download cache.
160 160 ## Once enabled, generated archives will be cached at this location
161 161 ## and served from the cache during subsequent requests for the same archive of
162 162 ## the repository.
163 163 #archive_cache_dir = /tmp/tarballcache
164 164
165 165 ## URL at which the application is running. This is used for bootstraping
166 166 ## requests in context when no web request is available. Used in ishell, or
167 167 ## SSH calls. Set this for events to receive proper url for SSH calls.
168 168 app.base_url = http://rhodecode.local
169 169
170 170 ## change this to unique ID for security
171 171 app_instance_uuid = rc-production
172 172
173 173 ## cut off limit for large diffs (size in bytes). If overall diff size on
174 174 ## commit, or pull request exceeds this limit this diff will be displayed
175 175 ## partially. E.g 512000 == 512Kb
176 176 cut_off_limit_diff = 512000
177 177
178 178 ## cut off limit for large files inside diffs (size in bytes). Each individual
179 179 ## file inside diff which exceeds this limit will be displayed partially.
180 180 ## E.g 128000 == 128Kb
181 181 cut_off_limit_file = 128000
182 182
183 183 ## use cache version of scm repo everywhere
184 184 vcs_full_cache = true
185 185
186 186 ## force https in RhodeCode, fixes https redirects, assumes it's always https
187 187 ## Normally this is controlled by proper http flags sent from http server
188 188 force_https = false
189 189
190 190 ## use Strict-Transport-Security headers
191 191 use_htsts = false
192 192
193 193 ## git rev filter option, --all is the default filter, if you need to
194 194 ## hide all refs in changelog switch this to --branches --tags
195 195 git_rev_filter = --branches --tags
196 196
197 197 # Set to true if your repos are exposed using the dumb protocol
198 198 git_update_server_info = false
199 199
200 200 ## RSS/ATOM feed options
201 201 rss_cut_off_limit = 256000
202 202 rss_items_per_page = 10
203 203 rss_include_diff = false
204 204
205 205 ## gist URL alias, used to create nicer urls for gist. This should be an
206 206 ## url that does rewrites to _admin/gists/{gistid}.
207 207 ## example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
208 208 ## RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
209 209 gist_alias_url =
210 210
211 211 ## List of views (using glob pattern syntax) that AUTH TOKENS could be
212 212 ## used for access.
213 213 ## Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
214 214 ## came from the the logged in user who own this authentication token.
215 215 ## Additionally @TOKEN syntaxt can be used to bound the view to specific
216 216 ## authentication token. Such view would be only accessible when used together
217 217 ## with this authentication token
218 218 ##
219 219 ## list of all views can be found under `/_admin/permissions/auth_token_access`
220 220 ## The list should be "," separated and on a single line.
221 221 ##
222 222 ## Most common views to enable:
223 223 # RepoCommitsView:repo_commit_download
224 224 # RepoCommitsView:repo_commit_patch
225 225 # RepoCommitsView:repo_commit_raw
226 226 # RepoCommitsView:repo_commit_raw@TOKEN
227 227 # RepoFilesView:repo_files_diff
228 228 # RepoFilesView:repo_archivefile
229 229 # RepoFilesView:repo_file_raw
230 230 # GistView:*
231 231 api_access_controllers_whitelist =
232 232
233 233 ## default encoding used to convert from and to unicode
234 234 ## can be also a comma separated list of encoding in case of mixed encodings
235 235 default_encoding = UTF-8
236 236
237 237 ## instance-id prefix
238 238 ## a prefix key for this instance used for cache invalidation when running
239 239 ## multiple instances of rhodecode, make sure it's globally unique for
240 240 ## all running rhodecode instances. Leave empty if you don't use it
241 241 instance_id =
242 242
243 243 ## Fallback authentication plugin. Set this to a plugin ID to force the usage
244 244 ## of an authentication plugin also if it is disabled by it's settings.
245 245 ## This could be useful if you are unable to log in to the system due to broken
246 246 ## authentication settings. Then you can enable e.g. the internal rhodecode auth
247 247 ## module to log in again and fix the settings.
248 248 ##
249 249 ## Available builtin plugin IDs (hash is part of the ID):
250 250 ## egg:rhodecode-enterprise-ce#rhodecode
251 251 ## egg:rhodecode-enterprise-ce#pam
252 252 ## egg:rhodecode-enterprise-ce#ldap
253 253 ## egg:rhodecode-enterprise-ce#jasig_cas
254 254 ## egg:rhodecode-enterprise-ce#headers
255 255 ## egg:rhodecode-enterprise-ce#crowd
256 256 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
257 257
258 258 ## alternative return HTTP header for failed authentication. Default HTTP
259 259 ## response is 401 HTTPUnauthorized. Currently HG clients have troubles with
260 260 ## handling that causing a series of failed authentication calls.
261 261 ## Set this variable to 403 to return HTTPForbidden, or any other HTTP code
262 262 ## This will be served instead of default 401 on bad authnetication
263 263 auth_ret_code =
264 264
265 265 ## use special detection method when serving auth_ret_code, instead of serving
266 266 ## ret_code directly, use 401 initially (Which triggers credentials prompt)
267 267 ## and then serve auth_ret_code to clients
268 268 auth_ret_code_detection = false
269 269
270 270 ## locking return code. When repository is locked return this HTTP code. 2XX
271 271 ## codes don't break the transactions while 4XX codes do
272 272 lock_ret_code = 423
273 273
274 274 ## allows to change the repository location in settings page
275 275 allow_repo_location_change = true
276 276
277 277 ## allows to setup custom hooks in settings page
278 278 allow_custom_hooks_settings = true
279 279
280 280 ## generated license token, goto license page in RhodeCode settings to obtain
281 281 ## new token
282 282 license_token =
283 283
284 284 ## supervisor connection uri, for managing supervisor and logs.
285 285 supervisor.uri =
286 286 ## supervisord group name/id we only want this RC instance to handle
287 287 supervisor.group_id = dev
288 288
289 289 ## Display extended labs settings
290 290 labs_settings_active = true
291 291
292 292 ####################################
293 293 ### CELERY CONFIG ####
294 294 ####################################
295 295 ## run: /path/to/celery worker \
296 296 ## -E --beat --app rhodecode.lib.celerylib.loader \
297 297 ## --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler \
298 298 ## --loglevel DEBUG --ini /path/to/rhodecode.ini
299 299
300 300 use_celery = false
301 301
302 302 ## connection url to the message broker (default rabbitmq)
303 303 celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
304 304
305 305 ## maximum tasks to execute before worker restart
306 306 celery.max_tasks_per_child = 100
307 307
308 308 ## tasks will never be sent to the queue, but executed locally instead.
309 309 celery.task_always_eager = false
310 310
311 311 ####################################
312 312 ### BEAKER CACHE ####
313 313 ####################################
314 314 # default cache dir for templates. Putting this into a ramdisk
315 315 ## can boost performance, eg. %(here)s/data_ramdisk
316 316 cache_dir = %(here)s/data
317 317
318 318 ## locking and default file storage for Beaker. Putting this into a ramdisk
319 319 ## can boost performance, eg. %(here)s/data_ramdisk/cache/beaker_data
320 320 beaker.cache.data_dir = %(here)s/data/cache/beaker_data
321 321 beaker.cache.lock_dir = %(here)s/data/cache/beaker_lock
322 322
323 323 beaker.cache.regions = short_term, long_term, sql_cache_short, auth_plugins, repo_cache_long
324 324
325 325 # used for caching user permissions
326 326 beaker.cache.short_term.type = file
327 327 beaker.cache.short_term.expire = 0
328 328 beaker.cache.short_term.key_length = 256
329 329
330 330 beaker.cache.long_term.type = memory
331 331 beaker.cache.long_term.expire = 36000
332 332 beaker.cache.long_term.key_length = 256
333 333
334 334 beaker.cache.sql_cache_short.type = memory
335 335 beaker.cache.sql_cache_short.expire = 10
336 336 beaker.cache.sql_cache_short.key_length = 256
337 337
338 338 ## default is memory cache, configure only if required
339 339 ## using multi-node or multi-worker setup
340 340 #beaker.cache.auth_plugins.type = ext:database
341 341 #beaker.cache.auth_plugins.lock_dir = %(here)s/data/cache/auth_plugin_lock
342 342 #beaker.cache.auth_plugins.url = postgresql://postgres:secret@localhost/rhodecode
343 343 #beaker.cache.auth_plugins.url = mysql://root:secret@127.0.0.1/rhodecode
344 344 #beaker.cache.auth_plugins.sa.pool_recycle = 3600
345 345 #beaker.cache.auth_plugins.sa.pool_size = 10
346 346 #beaker.cache.auth_plugins.sa.max_overflow = 0
347 347
348 348 beaker.cache.repo_cache_long.type = memorylru_base
349 349 beaker.cache.repo_cache_long.max_items = 4096
350 350 beaker.cache.repo_cache_long.expire = 2592000
351 351
352 352 ## default is memorylru_base cache, configure only if required
353 353 ## using multi-node or multi-worker setup
354 354 #beaker.cache.repo_cache_long.type = ext:memcached
355 355 #beaker.cache.repo_cache_long.url = localhost:11211
356 356 #beaker.cache.repo_cache_long.expire = 1209600
357 357 #beaker.cache.repo_cache_long.key_length = 256
358 358
359 359 ####################################
360 360 ### BEAKER SESSION ####
361 361 ####################################
362 362
363 363 ## .session.type is type of storage options for the session, current allowed
364 364 ## types are file, ext:memcached, ext:database, and memory (default).
365 365 beaker.session.type = file
366 366 beaker.session.data_dir = %(here)s/data/sessions/data
367 367
368 368 ## db based session, fast, and allows easy management over logged in users
369 369 #beaker.session.type = ext:database
370 370 #beaker.session.table_name = db_session
371 371 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
372 372 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
373 373 #beaker.session.sa.pool_recycle = 3600
374 374 #beaker.session.sa.echo = false
375 375
376 376 beaker.session.key = rhodecode
377 377 beaker.session.secret = develop-rc-uytcxaz
378 378 beaker.session.lock_dir = %(here)s/data/sessions/lock
379 379
380 380 ## Secure encrypted cookie. Requires AES and AES python libraries
381 381 ## you must disable beaker.session.secret to use this
382 382 #beaker.session.encrypt_key = key_for_encryption
383 383 #beaker.session.validate_key = validation_key
384 384
385 385 ## sets session as invalid(also logging out user) if it haven not been
386 386 ## accessed for given amount of time in seconds
387 387 beaker.session.timeout = 2592000
388 388 beaker.session.httponly = true
389 389 ## Path to use for the cookie. Set to prefix if you use prefix middleware
390 390 #beaker.session.cookie_path = /custom_prefix
391 391
392 392 ## uncomment for https secure cookie
393 393 beaker.session.secure = false
394 394
395 395 ## auto save the session to not to use .save()
396 396 beaker.session.auto = false
397 397
398 398 ## default cookie expiration time in seconds, set to `true` to set expire
399 399 ## at browser close
400 400 #beaker.session.cookie_expires = 3600
401 401
402 402 ###################################
403 403 ## SEARCH INDEXING CONFIGURATION ##
404 404 ###################################
405 405 ## Full text search indexer is available in rhodecode-tools under
406 406 ## `rhodecode-tools index` command
407 407
408 408 ## WHOOSH Backend, doesn't require additional services to run
409 409 ## it works good with few dozen repos
410 410 search.module = rhodecode.lib.index.whoosh
411 411 search.location = %(here)s/data/index
412 412
413 413 ########################################
414 414 ### CHANNELSTREAM CONFIG ####
415 415 ########################################
416 416 ## channelstream enables persistent connections and live notification
417 417 ## in the system. It's also used by the chat system
418 418 channelstream.enabled = false
419 419
420 420 ## server address for channelstream server on the backend
421 421 channelstream.server = 127.0.0.1:9800
422 422
423 423 ## location of the channelstream server from outside world
424 424 ## use ws:// for http or wss:// for https. This address needs to be handled
425 425 ## by external HTTP server such as Nginx or Apache
426 426 ## see nginx/apache configuration examples in our docs
427 427 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
428 428 channelstream.secret = secret
429 429 channelstream.history.location = %(here)s/channelstream_history
430 430
431 431 ## Internal application path that Javascript uses to connect into.
432 432 ## If you use proxy-prefix the prefix should be added before /_channelstream
433 433 channelstream.proxy_path = /_channelstream
434 434
435 435
436 436 ###################################
437 437 ## APPENLIGHT CONFIG ##
438 438 ###################################
439 439
440 440 ## Appenlight is tailored to work with RhodeCode, see
441 441 ## http://appenlight.com for details how to obtain an account
442 442
443 443 ## appenlight integration enabled
444 444 appenlight = false
445 445
446 446 appenlight.server_url = https://api.appenlight.com
447 447 appenlight.api_key = YOUR_API_KEY
448 448 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
449 449
450 450 # used for JS client
451 451 appenlight.api_public_key = YOUR_API_PUBLIC_KEY
452 452
453 453 ## TWEAK AMOUNT OF INFO SENT HERE
454 454
455 455 ## enables 404 error logging (default False)
456 456 appenlight.report_404 = false
457 457
458 458 ## time in seconds after request is considered being slow (default 1)
459 459 appenlight.slow_request_time = 1
460 460
461 461 ## record slow requests in application
462 462 ## (needs to be enabled for slow datastore recording and time tracking)
463 463 appenlight.slow_requests = true
464 464
465 465 ## enable hooking to application loggers
466 466 appenlight.logging = true
467 467
468 468 ## minimum log level for log capture
469 469 appenlight.logging.level = WARNING
470 470
471 471 ## send logs only from erroneous/slow requests
472 472 ## (saves API quota for intensive logging)
473 473 appenlight.logging_on_error = false
474 474
475 475 ## list of additonal keywords that should be grabbed from environ object
476 476 ## can be string with comma separated list of words in lowercase
477 477 ## (by default client will always send following info:
478 478 ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
479 479 ## start with HTTP* this list be extended with additional keywords here
480 480 appenlight.environ_keys_whitelist =
481 481
482 482 ## list of keywords that should be blanked from request object
483 483 ## can be string with comma separated list of words in lowercase
484 484 ## (by default client will always blank keys that contain following words
485 485 ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
486 486 ## this list be extended with additional keywords set here
487 487 appenlight.request_keys_blacklist =
488 488
489 489 ## list of namespaces that should be ignores when gathering log entries
490 490 ## can be string with comma separated list of namespaces
491 491 ## (by default the client ignores own entries: appenlight_client.client)
492 492 appenlight.log_namespace_blacklist =
493 493
494 494
495 495 ################################################################################
496 496 ## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT* ##
497 497 ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ##
498 498 ## execute malicious code after an exception is raised. ##
499 499 ################################################################################
500 500 #set debug = false
501 501
502 502
503 503 ##############
504 504 ## STYLING ##
505 505 ##############
506 506 debug_style = true
507 507
508 508 ###########################################
509 509 ### MAIN RHODECODE DATABASE CONFIG ###
510 510 ###########################################
511 511 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
512 512 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
513 513 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode
514 514 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
515 515
516 516 # see sqlalchemy docs for other advanced settings
517 517
518 518 ## print the sql statements to output
519 519 sqlalchemy.db1.echo = false
520 520 ## recycle the connections after this amount of seconds
521 521 sqlalchemy.db1.pool_recycle = 3600
522 522 sqlalchemy.db1.convert_unicode = true
523 523
524 524 ## the number of connections to keep open inside the connection pool.
525 525 ## 0 indicates no limit
526 526 #sqlalchemy.db1.pool_size = 5
527 527
528 528 ## the number of connections to allow in connection pool "overflow", that is
529 529 ## connections that can be opened above and beyond the pool_size setting,
530 530 ## which defaults to five.
531 531 #sqlalchemy.db1.max_overflow = 10
532 532
533 533
534 534 ##################
535 535 ### VCS CONFIG ###
536 536 ##################
537 537 vcs.server.enable = true
538 538 vcs.server = localhost:9900
539 539
540 540 ## Web server connectivity protocol, responsible for web based VCS operatations
541 541 ## Available protocols are:
542 542 ## `http` - use http-rpc backend (default)
543 543 vcs.server.protocol = http
544 544
545 545 ## Push/Pull operations protocol, available options are:
546 546 ## `http` - use http-rpc backend (default)
547 547 ##
548 548 vcs.scm_app_implementation = http
549 549
550 550 ## Push/Pull operations hooks protocol, available options are:
551 551 ## `http` - use http-rpc backend (default)
552 552 vcs.hooks.protocol = http
553 553
554 ## Host on which this instance is listening for hooks. If vcsserver is in other location
555 ## this should be adjusted.
556 vcs.hooks.host = 127.0.0.1
557
554 558 vcs.server.log_level = debug
555 559 ## Start VCSServer with this instance as a subprocess, usefull for development
556 560 vcs.start_server = false
557 561
558 562 ## List of enabled VCS backends, available options are:
559 563 ## `hg` - mercurial
560 564 ## `git` - git
561 565 ## `svn` - subversion
562 566 vcs.backends = hg, git, svn
563 567
564 568 vcs.connection_timeout = 3600
565 569 ## Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
566 570 ## Available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
567 571 #vcs.svn.compatible_version = pre-1.8-compatible
568 572
569 573
570 574 ############################################################
571 575 ### Subversion proxy support (mod_dav_svn) ###
572 576 ### Maps RhodeCode repo groups into SVN paths for Apache ###
573 577 ############################################################
574 578 ## Enable or disable the config file generation.
575 579 svn.proxy.generate_config = false
576 580 ## Generate config file with `SVNListParentPath` set to `On`.
577 581 svn.proxy.list_parent_path = true
578 582 ## Set location and file name of generated config file.
579 583 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
580 584 ## alternative mod_dav config template. This needs to be a mako template
581 585 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
582 586 ## Used as a prefix to the `Location` block in the generated config file.
583 587 ## In most cases it should be set to `/`.
584 588 svn.proxy.location_root = /
585 589 ## Command to reload the mod dav svn configuration on change.
586 590 ## Example: `/etc/init.d/apache2 reload`
587 591 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
588 592 ## If the timeout expires before the reload command finishes, the command will
589 593 ## be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
590 594 #svn.proxy.reload_timeout = 10
591 595
592 596 ############################################################
593 597 ### SSH Support Settings ###
594 598 ############################################################
595 599
596 600 ## Defines if a custom authorized_keys file should be created and written on
597 601 ## any change user ssh keys. Setting this to false also disables posibility
598 602 ## of adding SSH keys by users from web interface. Super admins can still
599 603 ## manage SSH Keys.
600 604 ssh.generate_authorized_keyfile = false
601 605
602 606 ## Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
603 607 # ssh.authorized_keys_ssh_opts =
604 608
605 609 ## Path to the authrozied_keys file where the generate entries are placed.
606 610 ## It is possible to have multiple key files specified in `sshd_config` e.g.
607 611 ## AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
608 612 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
609 613
610 614 ## Command to execute the SSH wrapper. The binary is available in the
611 615 ## rhodecode installation directory.
612 616 ## e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
613 617 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
614 618
615 619 ## Allow shell when executing the ssh-wrapper command
616 620 ssh.wrapper_cmd_allow_shell = false
617 621
618 622 ## Enables logging, and detailed output send back to the client during SSH
619 623 ## operations. Usefull for debugging, shouldn't be used in production.
620 624 ssh.enable_debug_logging = true
621 625
622 626 ## Paths to binary executable, by default they are the names, but we can
623 627 ## override them if we want to use a custom one
624 628 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
625 629 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
626 630 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
627 631
628 632
629 633 ## Dummy marker to add new entries after.
630 634 ## Add any custom entries below. Please don't remove.
631 635 custom.conf = 1
632 636
633 637
634 638 ################################
635 639 ### LOGGING CONFIGURATION ####
636 640 ################################
637 641 [loggers]
638 642 keys = root, sqlalchemy, beaker, rhodecode, ssh_wrapper, celery
639 643
640 644 [handlers]
641 645 keys = console, console_sql
642 646
643 647 [formatters]
644 648 keys = generic, color_formatter, color_formatter_sql
645 649
646 650 #############
647 651 ## LOGGERS ##
648 652 #############
649 653 [logger_root]
650 654 level = NOTSET
651 655 handlers = console
652 656
653 657 [logger_sqlalchemy]
654 658 level = INFO
655 659 handlers = console_sql
656 660 qualname = sqlalchemy.engine
657 661 propagate = 0
658 662
659 663 [logger_beaker]
660 664 level = DEBUG
661 665 handlers =
662 666 qualname = beaker.container
663 667 propagate = 1
664 668
665 669 [logger_rhodecode]
666 670 level = DEBUG
667 671 handlers =
668 672 qualname = rhodecode
669 673 propagate = 1
670 674
671 675 [logger_ssh_wrapper]
672 676 level = DEBUG
673 677 handlers =
674 678 qualname = ssh_wrapper
675 679 propagate = 1
676 680
677 681 [logger_celery]
678 682 level = DEBUG
679 683 handlers =
680 684 qualname = celery
681 685
682 686
683 687 ##############
684 688 ## HANDLERS ##
685 689 ##############
686 690
687 691 [handler_console]
688 692 class = StreamHandler
689 693 args = (sys.stderr, )
690 694 level = DEBUG
691 695 formatter = color_formatter
692 696
693 697 [handler_console_sql]
694 698 class = StreamHandler
695 699 args = (sys.stderr, )
696 700 level = DEBUG
697 701 formatter = color_formatter_sql
698 702
699 703 ################
700 704 ## FORMATTERS ##
701 705 ################
702 706
703 707 [formatter_generic]
704 708 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
705 709 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
706 710 datefmt = %Y-%m-%d %H:%M:%S
707 711
708 712 [formatter_color_formatter]
709 713 class = rhodecode.lib.logging_formatter.ColorFormatter
710 714 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
711 715 datefmt = %Y-%m-%d %H:%M:%S
712 716
713 717 [formatter_color_formatter_sql]
714 718 class = rhodecode.lib.logging_formatter.ColorFormatterSql
715 719 format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
716 720 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,686 +1,689 b''
1 1
2 2
3 3 ################################################################################
4 4 ## RHODECODE COMMUNITY EDITION CONFIGURATION ##
5 5 # The %(here)s variable will be replaced with the parent directory of this file#
6 6 ################################################################################
7 7
8 8 [DEFAULT]
9 9 debug = true
10 10
11 11 ################################################################################
12 12 ## EMAIL CONFIGURATION ##
13 13 ## Uncomment and replace with the email address which should receive ##
14 14 ## any error reports after an application crash ##
15 15 ## Additionally these settings will be used by the RhodeCode mailing system ##
16 16 ################################################################################
17 17
18 18 ## prefix all emails subjects with given prefix, helps filtering out emails
19 19 #email_prefix = [RhodeCode]
20 20
21 21 ## email FROM address all mails will be sent
22 22 #app_email_from = rhodecode-noreply@localhost
23 23
24 24 ## Uncomment and replace with the address which should receive any error report
25 25 ## note: using appenlight for error handling doesn't need this to be uncommented
26 26 #email_to = admin@localhost
27 27
28 28 ## in case of Application errors, sent an error email form
29 29 #error_email_from = rhodecode_error@localhost
30 30
31 31 ## additional error message to be send in case of server crash
32 32 #error_message =
33 33
34 34
35 35 #smtp_server = mail.server.com
36 36 #smtp_username =
37 37 #smtp_password =
38 38 #smtp_port =
39 39 #smtp_use_tls = false
40 40 #smtp_use_ssl = true
41 41 ## Specify available auth parameters here (e.g. LOGIN PLAIN CRAM-MD5, etc.)
42 42 #smtp_auth =
43 43
44 44 [server:main]
45 45 ## COMMON ##
46 46 host = 127.0.0.1
47 47 port = 5000
48 48
49 49 ##################################
50 50 ## WAITRESS WSGI SERVER ##
51 51 ## Recommended for Development ##
52 52 ##################################
53 53
54 54 #use = egg:waitress#main
55 55 ## number of worker threads
56 56 #threads = 5
57 57 ## MAX BODY SIZE 100GB
58 58 #max_request_body_size = 107374182400
59 59 ## Use poll instead of select, fixes file descriptors limits problems.
60 60 ## May not work on old windows systems.
61 61 #asyncore_use_poll = true
62 62
63 63
64 64 ##########################
65 65 ## GUNICORN WSGI SERVER ##
66 66 ##########################
67 67 ## run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
68 68
69 69 use = egg:gunicorn#main
70 70 ## Sets the number of process workers. You must set `instance_id = *`
71 71 ## when this option is set to more than one worker, recommended
72 72 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
73 73 ## The `instance_id = *` must be set in the [app:main] section below
74 74 workers = 2
75 75 ## number of threads for each of the worker, must be set to 1 for gevent
76 76 ## generally recommended to be at 1
77 77 #threads = 1
78 78 ## process name
79 79 proc_name = rhodecode
80 80 ## type of worker class, one of sync, gevent
81 81 ## recommended for bigger setup is using of of other than sync one
82 82 worker_class = gevent
83 83 ## The maximum number of simultaneous clients. Valid only for Gevent
84 84 #worker_connections = 10
85 85 ## max number of requests that worker will handle before being gracefully
86 86 ## restarted, could prevent memory leaks
87 87 max_requests = 1000
88 88 max_requests_jitter = 30
89 89 ## amount of time a worker can spend with handling a request before it
90 90 ## gets killed and restarted. Set to 6hrs
91 91 timeout = 21600
92 92
93 93
94 94 ## prefix middleware for RhodeCode.
95 95 ## recommended when using proxy setup.
96 96 ## allows to set RhodeCode under a prefix in server.
97 97 ## eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
98 98 ## And set your prefix like: `prefix = /custom_prefix`
99 99 ## be sure to also set beaker.session.cookie_path = /custom_prefix if you need
100 100 ## to make your cookies only work on prefix url
101 101 [filter:proxy-prefix]
102 102 use = egg:PasteDeploy#prefix
103 103 prefix = /
104 104
105 105 [app:main]
106 106 use = egg:rhodecode-enterprise-ce
107 107
108 108 ## enable proxy prefix middleware, defined above
109 109 #filter-with = proxy-prefix
110 110
111 111 ## encryption key used to encrypt social plugin tokens,
112 112 ## remote_urls with credentials etc, if not set it defaults to
113 113 ## `beaker.session.secret`
114 114 #rhodecode.encrypted_values.secret =
115 115
116 116 ## decryption strict mode (enabled by default). It controls if decryption raises
117 117 ## `SignatureVerificationError` in case of wrong key, or damaged encryption data.
118 118 #rhodecode.encrypted_values.strict = false
119 119
120 120 ## return gzipped responses from Rhodecode (static files/application)
121 121 gzip_responses = false
122 122
123 123 ## autogenerate javascript routes file on startup
124 124 generate_js_files = false
125 125
126 126 ## Optional Languages
127 127 ## en(default), be, de, es, fr, it, ja, pl, pt, ru, zh
128 128 lang = en
129 129
130 130 ## perform a full repository scan on each server start, this should be
131 131 ## set to false after first startup, to allow faster server restarts.
132 132 startup.import_repos = false
133 133
134 134 ## Uncomment and set this path to use archive download cache.
135 135 ## Once enabled, generated archives will be cached at this location
136 136 ## and served from the cache during subsequent requests for the same archive of
137 137 ## the repository.
138 138 #archive_cache_dir = /tmp/tarballcache
139 139
140 140 ## URL at which the application is running. This is used for bootstraping
141 141 ## requests in context when no web request is available. Used in ishell, or
142 142 ## SSH calls. Set this for events to receive proper url for SSH calls.
143 143 app.base_url = http://rhodecode.local
144 144
145 145 ## change this to unique ID for security
146 146 app_instance_uuid = rc-production
147 147
148 148 ## cut off limit for large diffs (size in bytes). If overall diff size on
149 149 ## commit, or pull request exceeds this limit this diff will be displayed
150 150 ## partially. E.g 512000 == 512Kb
151 151 cut_off_limit_diff = 512000
152 152
153 153 ## cut off limit for large files inside diffs (size in bytes). Each individual
154 154 ## file inside diff which exceeds this limit will be displayed partially.
155 155 ## E.g 128000 == 128Kb
156 156 cut_off_limit_file = 128000
157 157
158 158 ## use cache version of scm repo everywhere
159 159 vcs_full_cache = true
160 160
161 161 ## force https in RhodeCode, fixes https redirects, assumes it's always https
162 162 ## Normally this is controlled by proper http flags sent from http server
163 163 force_https = false
164 164
165 165 ## use Strict-Transport-Security headers
166 166 use_htsts = false
167 167
168 168 ## git rev filter option, --all is the default filter, if you need to
169 169 ## hide all refs in changelog switch this to --branches --tags
170 170 git_rev_filter = --branches --tags
171 171
172 172 # Set to true if your repos are exposed using the dumb protocol
173 173 git_update_server_info = false
174 174
175 175 ## RSS/ATOM feed options
176 176 rss_cut_off_limit = 256000
177 177 rss_items_per_page = 10
178 178 rss_include_diff = false
179 179
180 180 ## gist URL alias, used to create nicer urls for gist. This should be an
181 181 ## url that does rewrites to _admin/gists/{gistid}.
182 182 ## example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
183 183 ## RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
184 184 gist_alias_url =
185 185
186 186 ## List of views (using glob pattern syntax) that AUTH TOKENS could be
187 187 ## used for access.
188 188 ## Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
189 189 ## came from the the logged in user who own this authentication token.
190 190 ## Additionally @TOKEN syntaxt can be used to bound the view to specific
191 191 ## authentication token. Such view would be only accessible when used together
192 192 ## with this authentication token
193 193 ##
194 194 ## list of all views can be found under `/_admin/permissions/auth_token_access`
195 195 ## The list should be "," separated and on a single line.
196 196 ##
197 197 ## Most common views to enable:
198 198 # RepoCommitsView:repo_commit_download
199 199 # RepoCommitsView:repo_commit_patch
200 200 # RepoCommitsView:repo_commit_raw
201 201 # RepoCommitsView:repo_commit_raw@TOKEN
202 202 # RepoFilesView:repo_files_diff
203 203 # RepoFilesView:repo_archivefile
204 204 # RepoFilesView:repo_file_raw
205 205 # GistView:*
206 206 api_access_controllers_whitelist =
207 207
208 208 ## default encoding used to convert from and to unicode
209 209 ## can be also a comma separated list of encoding in case of mixed encodings
210 210 default_encoding = UTF-8
211 211
212 212 ## instance-id prefix
213 213 ## a prefix key for this instance used for cache invalidation when running
214 214 ## multiple instances of rhodecode, make sure it's globally unique for
215 215 ## all running rhodecode instances. Leave empty if you don't use it
216 216 instance_id =
217 217
218 218 ## Fallback authentication plugin. Set this to a plugin ID to force the usage
219 219 ## of an authentication plugin also if it is disabled by it's settings.
220 220 ## This could be useful if you are unable to log in to the system due to broken
221 221 ## authentication settings. Then you can enable e.g. the internal rhodecode auth
222 222 ## module to log in again and fix the settings.
223 223 ##
224 224 ## Available builtin plugin IDs (hash is part of the ID):
225 225 ## egg:rhodecode-enterprise-ce#rhodecode
226 226 ## egg:rhodecode-enterprise-ce#pam
227 227 ## egg:rhodecode-enterprise-ce#ldap
228 228 ## egg:rhodecode-enterprise-ce#jasig_cas
229 229 ## egg:rhodecode-enterprise-ce#headers
230 230 ## egg:rhodecode-enterprise-ce#crowd
231 231 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
232 232
233 233 ## alternative return HTTP header for failed authentication. Default HTTP
234 234 ## response is 401 HTTPUnauthorized. Currently HG clients have troubles with
235 235 ## handling that causing a series of failed authentication calls.
236 236 ## Set this variable to 403 to return HTTPForbidden, or any other HTTP code
237 237 ## This will be served instead of default 401 on bad authnetication
238 238 auth_ret_code =
239 239
240 240 ## use special detection method when serving auth_ret_code, instead of serving
241 241 ## ret_code directly, use 401 initially (Which triggers credentials prompt)
242 242 ## and then serve auth_ret_code to clients
243 243 auth_ret_code_detection = false
244 244
245 245 ## locking return code. When repository is locked return this HTTP code. 2XX
246 246 ## codes don't break the transactions while 4XX codes do
247 247 lock_ret_code = 423
248 248
249 249 ## allows to change the repository location in settings page
250 250 allow_repo_location_change = true
251 251
252 252 ## allows to setup custom hooks in settings page
253 253 allow_custom_hooks_settings = true
254 254
255 255 ## generated license token, goto license page in RhodeCode settings to obtain
256 256 ## new token
257 257 license_token =
258 258
259 259 ## supervisor connection uri, for managing supervisor and logs.
260 260 supervisor.uri =
261 261 ## supervisord group name/id we only want this RC instance to handle
262 262 supervisor.group_id = prod
263 263
264 264 ## Display extended labs settings
265 265 labs_settings_active = true
266 266
267 267 ####################################
268 268 ### CELERY CONFIG ####
269 269 ####################################
270 270 ## run: /path/to/celery worker \
271 271 ## -E --beat --app rhodecode.lib.celerylib.loader \
272 272 ## --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler \
273 273 ## --loglevel DEBUG --ini /path/to/rhodecode.ini
274 274
275 275 use_celery = false
276 276
277 277 ## connection url to the message broker (default rabbitmq)
278 278 celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
279 279
280 280 ## maximum tasks to execute before worker restart
281 281 celery.max_tasks_per_child = 100
282 282
283 283 ## tasks will never be sent to the queue, but executed locally instead.
284 284 celery.task_always_eager = false
285 285
286 286 ####################################
287 287 ### BEAKER CACHE ####
288 288 ####################################
289 289 # default cache dir for templates. Putting this into a ramdisk
290 290 ## can boost performance, eg. %(here)s/data_ramdisk
291 291 cache_dir = %(here)s/data
292 292
293 293 ## locking and default file storage for Beaker. Putting this into a ramdisk
294 294 ## can boost performance, eg. %(here)s/data_ramdisk/cache/beaker_data
295 295 beaker.cache.data_dir = %(here)s/data/cache/beaker_data
296 296 beaker.cache.lock_dir = %(here)s/data/cache/beaker_lock
297 297
298 298 beaker.cache.regions = short_term, long_term, sql_cache_short, auth_plugins, repo_cache_long
299 299
300 300 # used for caching user permissions
301 301 beaker.cache.short_term.type = file
302 302 beaker.cache.short_term.expire = 0
303 303 beaker.cache.short_term.key_length = 256
304 304
305 305 beaker.cache.long_term.type = memory
306 306 beaker.cache.long_term.expire = 36000
307 307 beaker.cache.long_term.key_length = 256
308 308
309 309 beaker.cache.sql_cache_short.type = memory
310 310 beaker.cache.sql_cache_short.expire = 10
311 311 beaker.cache.sql_cache_short.key_length = 256
312 312
313 313 ## default is memory cache, configure only if required
314 314 ## using multi-node or multi-worker setup
315 315 #beaker.cache.auth_plugins.type = ext:database
316 316 #beaker.cache.auth_plugins.lock_dir = %(here)s/data/cache/auth_plugin_lock
317 317 #beaker.cache.auth_plugins.url = postgresql://postgres:secret@localhost/rhodecode
318 318 #beaker.cache.auth_plugins.url = mysql://root:secret@127.0.0.1/rhodecode
319 319 #beaker.cache.auth_plugins.sa.pool_recycle = 3600
320 320 #beaker.cache.auth_plugins.sa.pool_size = 10
321 321 #beaker.cache.auth_plugins.sa.max_overflow = 0
322 322
323 323 beaker.cache.repo_cache_long.type = memorylru_base
324 324 beaker.cache.repo_cache_long.max_items = 4096
325 325 beaker.cache.repo_cache_long.expire = 2592000
326 326
327 327 ## default is memorylru_base cache, configure only if required
328 328 ## using multi-node or multi-worker setup
329 329 #beaker.cache.repo_cache_long.type = ext:memcached
330 330 #beaker.cache.repo_cache_long.url = localhost:11211
331 331 #beaker.cache.repo_cache_long.expire = 1209600
332 332 #beaker.cache.repo_cache_long.key_length = 256
333 333
334 334 ####################################
335 335 ### BEAKER SESSION ####
336 336 ####################################
337 337
338 338 ## .session.type is type of storage options for the session, current allowed
339 339 ## types are file, ext:memcached, ext:database, and memory (default).
340 340 beaker.session.type = file
341 341 beaker.session.data_dir = %(here)s/data/sessions/data
342 342
343 343 ## db based session, fast, and allows easy management over logged in users
344 344 #beaker.session.type = ext:database
345 345 #beaker.session.table_name = db_session
346 346 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
347 347 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
348 348 #beaker.session.sa.pool_recycle = 3600
349 349 #beaker.session.sa.echo = false
350 350
351 351 beaker.session.key = rhodecode
352 352 beaker.session.secret = production-rc-uytcxaz
353 353 beaker.session.lock_dir = %(here)s/data/sessions/lock
354 354
355 355 ## Secure encrypted cookie. Requires AES and AES python libraries
356 356 ## you must disable beaker.session.secret to use this
357 357 #beaker.session.encrypt_key = key_for_encryption
358 358 #beaker.session.validate_key = validation_key
359 359
360 360 ## sets session as invalid(also logging out user) if it haven not been
361 361 ## accessed for given amount of time in seconds
362 362 beaker.session.timeout = 2592000
363 363 beaker.session.httponly = true
364 364 ## Path to use for the cookie. Set to prefix if you use prefix middleware
365 365 #beaker.session.cookie_path = /custom_prefix
366 366
367 367 ## uncomment for https secure cookie
368 368 beaker.session.secure = false
369 369
370 370 ## auto save the session to not to use .save()
371 371 beaker.session.auto = false
372 372
373 373 ## default cookie expiration time in seconds, set to `true` to set expire
374 374 ## at browser close
375 375 #beaker.session.cookie_expires = 3600
376 376
377 377 ###################################
378 378 ## SEARCH INDEXING CONFIGURATION ##
379 379 ###################################
380 380 ## Full text search indexer is available in rhodecode-tools under
381 381 ## `rhodecode-tools index` command
382 382
383 383 ## WHOOSH Backend, doesn't require additional services to run
384 384 ## it works good with few dozen repos
385 385 search.module = rhodecode.lib.index.whoosh
386 386 search.location = %(here)s/data/index
387 387
388 388 ########################################
389 389 ### CHANNELSTREAM CONFIG ####
390 390 ########################################
391 391 ## channelstream enables persistent connections and live notification
392 392 ## in the system. It's also used by the chat system
393 393 channelstream.enabled = false
394 394
395 395 ## server address for channelstream server on the backend
396 396 channelstream.server = 127.0.0.1:9800
397 397
398 398 ## location of the channelstream server from outside world
399 399 ## use ws:// for http or wss:// for https. This address needs to be handled
400 400 ## by external HTTP server such as Nginx or Apache
401 401 ## see nginx/apache configuration examples in our docs
402 402 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
403 403 channelstream.secret = secret
404 404 channelstream.history.location = %(here)s/channelstream_history
405 405
406 406 ## Internal application path that Javascript uses to connect into.
407 407 ## If you use proxy-prefix the prefix should be added before /_channelstream
408 408 channelstream.proxy_path = /_channelstream
409 409
410 410
411 411 ###################################
412 412 ## APPENLIGHT CONFIG ##
413 413 ###################################
414 414
415 415 ## Appenlight is tailored to work with RhodeCode, see
416 416 ## http://appenlight.com for details how to obtain an account
417 417
418 418 ## appenlight integration enabled
419 419 appenlight = false
420 420
421 421 appenlight.server_url = https://api.appenlight.com
422 422 appenlight.api_key = YOUR_API_KEY
423 423 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
424 424
425 425 # used for JS client
426 426 appenlight.api_public_key = YOUR_API_PUBLIC_KEY
427 427
428 428 ## TWEAK AMOUNT OF INFO SENT HERE
429 429
430 430 ## enables 404 error logging (default False)
431 431 appenlight.report_404 = false
432 432
433 433 ## time in seconds after request is considered being slow (default 1)
434 434 appenlight.slow_request_time = 1
435 435
436 436 ## record slow requests in application
437 437 ## (needs to be enabled for slow datastore recording and time tracking)
438 438 appenlight.slow_requests = true
439 439
440 440 ## enable hooking to application loggers
441 441 appenlight.logging = true
442 442
443 443 ## minimum log level for log capture
444 444 appenlight.logging.level = WARNING
445 445
446 446 ## send logs only from erroneous/slow requests
447 447 ## (saves API quota for intensive logging)
448 448 appenlight.logging_on_error = false
449 449
450 450 ## list of additonal keywords that should be grabbed from environ object
451 451 ## can be string with comma separated list of words in lowercase
452 452 ## (by default client will always send following info:
453 453 ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
454 454 ## start with HTTP* this list be extended with additional keywords here
455 455 appenlight.environ_keys_whitelist =
456 456
457 457 ## list of keywords that should be blanked from request object
458 458 ## can be string with comma separated list of words in lowercase
459 459 ## (by default client will always blank keys that contain following words
460 460 ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
461 461 ## this list be extended with additional keywords set here
462 462 appenlight.request_keys_blacklist =
463 463
464 464 ## list of namespaces that should be ignores when gathering log entries
465 465 ## can be string with comma separated list of namespaces
466 466 ## (by default the client ignores own entries: appenlight_client.client)
467 467 appenlight.log_namespace_blacklist =
468 468
469 469
470 470 ################################################################################
471 471 ## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT* ##
472 472 ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ##
473 473 ## execute malicious code after an exception is raised. ##
474 474 ################################################################################
475 475 set debug = false
476 476
477 477
478 478 ###########################################
479 479 ### MAIN RHODECODE DATABASE CONFIG ###
480 480 ###########################################
481 481 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
482 482 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
483 483 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode
484 484 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
485 485
486 486 # see sqlalchemy docs for other advanced settings
487 487
488 488 ## print the sql statements to output
489 489 sqlalchemy.db1.echo = false
490 490 ## recycle the connections after this amount of seconds
491 491 sqlalchemy.db1.pool_recycle = 3600
492 492 sqlalchemy.db1.convert_unicode = true
493 493
494 494 ## the number of connections to keep open inside the connection pool.
495 495 ## 0 indicates no limit
496 496 #sqlalchemy.db1.pool_size = 5
497 497
498 498 ## the number of connections to allow in connection pool "overflow", that is
499 499 ## connections that can be opened above and beyond the pool_size setting,
500 500 ## which defaults to five.
501 501 #sqlalchemy.db1.max_overflow = 10
502 502
503 503
504 504 ##################
505 505 ### VCS CONFIG ###
506 506 ##################
507 507 vcs.server.enable = true
508 508 vcs.server = localhost:9900
509 509
510 510 ## Web server connectivity protocol, responsible for web based VCS operatations
511 511 ## Available protocols are:
512 512 ## `http` - use http-rpc backend (default)
513 513 vcs.server.protocol = http
514 514
515 515 ## Push/Pull operations protocol, available options are:
516 516 ## `http` - use http-rpc backend (default)
517 517 ##
518 518 vcs.scm_app_implementation = http
519 519
520 520 ## Push/Pull operations hooks protocol, available options are:
521 521 ## `http` - use http-rpc backend (default)
522 522 vcs.hooks.protocol = http
523 ## Host on which this instance is listening for hooks. If vcsserver is in other location
524 ## this should be adjusted.
525 vcs.hooks.host = 127.0.0.1
523 526
524 527 vcs.server.log_level = info
525 528 ## Start VCSServer with this instance as a subprocess, usefull for development
526 529 vcs.start_server = false
527 530
528 531 ## List of enabled VCS backends, available options are:
529 532 ## `hg` - mercurial
530 533 ## `git` - git
531 534 ## `svn` - subversion
532 535 vcs.backends = hg, git, svn
533 536
534 537 vcs.connection_timeout = 3600
535 538 ## Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
536 539 ## Available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
537 540 #vcs.svn.compatible_version = pre-1.8-compatible
538 541
539 542
540 543 ############################################################
541 544 ### Subversion proxy support (mod_dav_svn) ###
542 545 ### Maps RhodeCode repo groups into SVN paths for Apache ###
543 546 ############################################################
544 547 ## Enable or disable the config file generation.
545 548 svn.proxy.generate_config = false
546 549 ## Generate config file with `SVNListParentPath` set to `On`.
547 550 svn.proxy.list_parent_path = true
548 551 ## Set location and file name of generated config file.
549 552 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
550 553 ## alternative mod_dav config template. This needs to be a mako template
551 554 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
552 555 ## Used as a prefix to the `Location` block in the generated config file.
553 556 ## In most cases it should be set to `/`.
554 557 svn.proxy.location_root = /
555 558 ## Command to reload the mod dav svn configuration on change.
556 559 ## Example: `/etc/init.d/apache2 reload`
557 560 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
558 561 ## If the timeout expires before the reload command finishes, the command will
559 562 ## be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
560 563 #svn.proxy.reload_timeout = 10
561 564
562 565 ############################################################
563 566 ### SSH Support Settings ###
564 567 ############################################################
565 568
566 569 ## Defines if a custom authorized_keys file should be created and written on
567 570 ## any change user ssh keys. Setting this to false also disables posibility
568 571 ## of adding SSH keys by users from web interface. Super admins can still
569 572 ## manage SSH Keys.
570 573 ssh.generate_authorized_keyfile = false
571 574
572 575 ## Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
573 576 # ssh.authorized_keys_ssh_opts =
574 577
575 578 ## Path to the authrozied_keys file where the generate entries are placed.
576 579 ## It is possible to have multiple key files specified in `sshd_config` e.g.
577 580 ## AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
578 581 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
579 582
580 583 ## Command to execute the SSH wrapper. The binary is available in the
581 584 ## rhodecode installation directory.
582 585 ## e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
583 586 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
584 587
585 588 ## Allow shell when executing the ssh-wrapper command
586 589 ssh.wrapper_cmd_allow_shell = false
587 590
588 591 ## Enables logging, and detailed output send back to the client during SSH
589 592 ## operations. Usefull for debugging, shouldn't be used in production.
590 593 ssh.enable_debug_logging = false
591 594
592 595 ## Paths to binary executable, by default they are the names, but we can
593 596 ## override them if we want to use a custom one
594 597 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
595 598 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
596 599 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
597 600
598 601
599 602 ## Dummy marker to add new entries after.
600 603 ## Add any custom entries below. Please don't remove.
601 604 custom.conf = 1
602 605
603 606
604 607 ################################
605 608 ### LOGGING CONFIGURATION ####
606 609 ################################
607 610 [loggers]
608 611 keys = root, sqlalchemy, beaker, rhodecode, ssh_wrapper, celery
609 612
610 613 [handlers]
611 614 keys = console, console_sql
612 615
613 616 [formatters]
614 617 keys = generic, color_formatter, color_formatter_sql
615 618
616 619 #############
617 620 ## LOGGERS ##
618 621 #############
619 622 [logger_root]
620 623 level = NOTSET
621 624 handlers = console
622 625
623 626 [logger_sqlalchemy]
624 627 level = INFO
625 628 handlers = console_sql
626 629 qualname = sqlalchemy.engine
627 630 propagate = 0
628 631
629 632 [logger_beaker]
630 633 level = DEBUG
631 634 handlers =
632 635 qualname = beaker.container
633 636 propagate = 1
634 637
635 638 [logger_rhodecode]
636 639 level = DEBUG
637 640 handlers =
638 641 qualname = rhodecode
639 642 propagate = 1
640 643
641 644 [logger_ssh_wrapper]
642 645 level = DEBUG
643 646 handlers =
644 647 qualname = ssh_wrapper
645 648 propagate = 1
646 649
647 650 [logger_celery]
648 651 level = DEBUG
649 652 handlers =
650 653 qualname = celery
651 654
652 655
653 656 ##############
654 657 ## HANDLERS ##
655 658 ##############
656 659
657 660 [handler_console]
658 661 class = StreamHandler
659 662 args = (sys.stderr, )
660 663 level = INFO
661 664 formatter = generic
662 665
663 666 [handler_console_sql]
664 667 class = StreamHandler
665 668 args = (sys.stderr, )
666 669 level = WARN
667 670 formatter = generic
668 671
669 672 ################
670 673 ## FORMATTERS ##
671 674 ################
672 675
673 676 [formatter_generic]
674 677 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
675 678 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
676 679 datefmt = %Y-%m-%d %H:%M:%S
677 680
678 681 [formatter_color_formatter]
679 682 class = rhodecode.lib.logging_formatter.ColorFormatter
680 683 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
681 684 datefmt = %Y-%m-%d %H:%M:%S
682 685
683 686 [formatter_color_formatter_sql]
684 687 class = rhodecode.lib.logging_formatter.ColorFormatterSql
685 688 format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
686 689 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,150 +1,151 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2016-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import os
22 22 import sys
23 23 import json
24 24 import logging
25 25
26 26 from rhodecode.lib.hooks_daemon import prepare_callback_daemon
27 27 from rhodecode.lib.vcs.conf import settings as vcs_settings
28 28 from rhodecode.model.scm import ScmModel
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class VcsServer(object):
34 34 _path = None # set executable path for hg/git/svn binary
35 35 backend = None # set in child classes
36 36 tunnel = None # subprocess handling tunnel
37 37 write_perms = ['repository.admin', 'repository.write']
38 38 read_perms = ['repository.read', 'repository.admin', 'repository.write']
39 39
40 40 def __init__(self, user, user_permissions, config, env):
41 41 self.user = user
42 42 self.user_permissions = user_permissions
43 43 self.config = config
44 44 self.env = env
45 45 self.stdin = sys.stdin
46 46
47 47 self.repo_name = None
48 48 self.repo_mode = None
49 49 self.store = ''
50 50 self.ini_path = ''
51 51
52 52 def _invalidate_cache(self, repo_name):
53 53 """
54 54 Set's cache for this repository for invalidation on next access
55 55
56 56 :param repo_name: full repo name, also a cache key
57 57 """
58 58 ScmModel().mark_for_invalidation(repo_name)
59 59
60 60 def has_write_perm(self):
61 61 permission = self.user_permissions.get(self.repo_name)
62 62 if permission in ['repository.write', 'repository.admin']:
63 63 return True
64 64
65 65 return False
66 66
67 67 def _check_permissions(self, action):
68 68 permission = self.user_permissions.get(self.repo_name)
69 69 log.debug(
70 70 'permission for %s on %s are: %s',
71 71 self.user, self.repo_name, permission)
72 72
73 73 if action == 'pull':
74 74 if permission in self.read_perms:
75 75 log.info(
76 76 'READ Permissions for User "%s" detected to repo "%s"!',
77 77 self.user, self.repo_name)
78 78 return 0
79 79 else:
80 80 if permission in self.write_perms:
81 81 log.info(
82 82 'WRITE+ Permissions for User "%s" detected to repo "%s"!',
83 83 self.user, self.repo_name)
84 84 return 0
85 85
86 86 log.error('Cannot properly fetch or allow user %s permissions. '
87 87 'Return value is: %s, req action: %s',
88 88 self.user, permission, action)
89 89 return -2
90 90
91 91 def update_environment(self, action, extras=None):
92 92
93 93 scm_data = {
94 94 'ip': os.environ['SSH_CLIENT'].split()[0],
95 95 'username': self.user.username,
96 96 'user_id': self.user.user_id,
97 97 'action': action,
98 98 'repository': self.repo_name,
99 99 'scm': self.backend,
100 100 'config': self.ini_path,
101 101 'make_lock': None,
102 102 'locked_by': [None, None],
103 103 'server_url': None,
104 104 'is_shadow_repo': False,
105 105 'hooks_module': 'rhodecode.lib.hooks_daemon',
106 106 'hooks': ['push', 'pull'],
107 107 'SSH': True,
108 108 'SSH_PERMISSIONS': self.user_permissions.get(self.repo_name)
109 109 }
110 110 if extras:
111 111 scm_data.update(extras)
112 112 os.putenv("RC_SCM_DATA", json.dumps(scm_data))
113 113
114 114 def get_root_store(self):
115 115 root_store = self.store
116 116 if not root_store.endswith('/'):
117 117 # always append trailing slash
118 118 root_store = root_store + '/'
119 119 return root_store
120 120
121 121 def _handle_tunnel(self, extras):
122 122 # pre-auth
123 123 action = 'pull'
124 124 exit_code = self._check_permissions(action)
125 125 if exit_code:
126 126 return exit_code, False
127 127
128 128 req = self.env['request']
129 129 server_url = req.host_url + req.script_name
130 130 extras['server_url'] = server_url
131 131
132 132 log.debug('Using %s binaries from path %s', self.backend, self._path)
133 133 exit_code = self.tunnel.run(extras)
134 134
135 135 return exit_code, action == "push"
136 136
137 137 def run(self):
138 138 extras = {}
139 139
140 140 callback_daemon, extras = prepare_callback_daemon(
141 141 extras, protocol=vcs_settings.HOOKS_PROTOCOL,
142 host=vcs_settings.HOOKS_HOST,
142 143 use_direct_calls=False)
143 144
144 145 with callback_daemon:
145 146 try:
146 147 return self._handle_tunnel(extras)
147 148 finally:
148 149 log.debug('Running cleanup with cache invalidation')
149 150 if self.repo_name:
150 151 self._invalidate_cache(self.repo_name)
@@ -1,460 +1,461 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import os
22 22 import logging
23 23 import traceback
24 24 import collections
25 25
26 26 from paste.gzipper import make_gzip_middleware
27 27 from pyramid.wsgi import wsgiapp
28 28 from pyramid.authorization import ACLAuthorizationPolicy
29 29 from pyramid.config import Configurator
30 30 from pyramid.settings import asbool, aslist
31 31 from pyramid.httpexceptions import (
32 32 HTTPException, HTTPError, HTTPInternalServerError, HTTPFound, HTTPNotFound)
33 33 from pyramid.events import ApplicationCreated
34 34 from pyramid.renderers import render_to_response
35 35
36 36 from rhodecode.model import meta
37 37 from rhodecode.config import patches
38 38 from rhodecode.config import utils as config_utils
39 39 from rhodecode.config.environment import load_pyramid_environment
40 40
41 41 from rhodecode.lib.middleware.vcs import VCSMiddleware
42 42 from rhodecode.lib.request import Request
43 43 from rhodecode.lib.vcs import VCSCommunicationError
44 44 from rhodecode.lib.exceptions import VCSServerUnavailable
45 45 from rhodecode.lib.middleware.appenlight import wrap_in_appenlight_if_enabled
46 46 from rhodecode.lib.middleware.https_fixup import HttpsFixup
47 47 from rhodecode.lib.celerylib.loader import configure_celery
48 48 from rhodecode.lib.plugins.utils import register_rhodecode_plugin
49 49 from rhodecode.lib.utils2 import aslist as rhodecode_aslist, AttributeDict
50 50 from rhodecode.subscribers import (
51 51 scan_repositories_if_enabled, write_js_routes_if_enabled,
52 52 write_metadata_if_needed, inject_app_settings)
53 53
54 54
55 55 log = logging.getLogger(__name__)
56 56
57 57
58 58 def is_http_error(response):
59 59 # error which should have traceback
60 60 return response.status_code > 499
61 61
62 62
63 63 def make_pyramid_app(global_config, **settings):
64 64 """
65 65 Constructs the WSGI application based on Pyramid.
66 66
67 67 Specials:
68 68
69 69 * The application can also be integrated like a plugin via the call to
70 70 `includeme`. This is accompanied with the other utility functions which
71 71 are called. Changing this should be done with great care to not break
72 72 cases when these fragments are assembled from another place.
73 73
74 74 """
75 75
76 76 # Allows to use format style "{ENV_NAME}" placeholders in the configuration. It
77 77 # will be replaced by the value of the environment variable "NAME" in this case.
78 78 environ = {
79 79 'ENV_{}'.format(key): value for key, value in os.environ.items()}
80 80
81 81 global_config = _substitute_values(global_config, environ)
82 82 settings = _substitute_values(settings, environ)
83 83
84 84 sanitize_settings_and_apply_defaults(settings)
85 85
86 86 config = Configurator(settings=settings)
87 87
88 88 # Apply compatibility patches
89 89 patches.inspect_getargspec()
90 90
91 91 load_pyramid_environment(global_config, settings)
92 92
93 93 # Static file view comes first
94 94 includeme_first(config)
95 95
96 96 includeme(config)
97 97
98 98 pyramid_app = config.make_wsgi_app()
99 99 pyramid_app = wrap_app_in_wsgi_middlewares(pyramid_app, config)
100 100 pyramid_app.config = config
101 101
102 102 config.configure_celery(global_config['__file__'])
103 103 # creating the app uses a connection - return it after we are done
104 104 meta.Session.remove()
105 105
106 106 log.info('Pyramid app %s created and configured.', pyramid_app)
107 107 return pyramid_app
108 108
109 109
110 110 def not_found_view(request):
111 111 """
112 112 This creates the view which should be registered as not-found-view to
113 113 pyramid.
114 114 """
115 115
116 116 if not getattr(request, 'vcs_call', None):
117 117 # handle like regular case with our error_handler
118 118 return error_handler(HTTPNotFound(), request)
119 119
120 120 # handle not found view as a vcs call
121 121 settings = request.registry.settings
122 122 ae_client = getattr(request, 'ae_client', None)
123 123 vcs_app = VCSMiddleware(
124 124 HTTPNotFound(), request.registry, settings,
125 125 appenlight_client=ae_client)
126 126
127 127 return wsgiapp(vcs_app)(None, request)
128 128
129 129
130 130 def error_handler(exception, request):
131 131 import rhodecode
132 132 from rhodecode.lib import helpers
133 133
134 134 rhodecode_title = rhodecode.CONFIG.get('rhodecode_title') or 'RhodeCode'
135 135
136 136 base_response = HTTPInternalServerError()
137 137 # prefer original exception for the response since it may have headers set
138 138 if isinstance(exception, HTTPException):
139 139 base_response = exception
140 140 elif isinstance(exception, VCSCommunicationError):
141 141 base_response = VCSServerUnavailable()
142 142
143 143 if is_http_error(base_response):
144 144 log.exception(
145 145 'error occurred handling this request for path: %s', request.path)
146 146
147 147 error_explanation = base_response.explanation or str(base_response)
148 148 if base_response.status_code == 404:
149 149 error_explanation += " Or you don't have permission to access it."
150 150 c = AttributeDict()
151 151 c.error_message = base_response.status
152 152 c.error_explanation = error_explanation
153 153 c.visual = AttributeDict()
154 154
155 155 c.visual.rhodecode_support_url = (
156 156 request.registry.settings.get('rhodecode_support_url') or
157 157 request.route_url('rhodecode_support')
158 158 )
159 159 c.redirect_time = 0
160 160 c.rhodecode_name = rhodecode_title
161 161 if not c.rhodecode_name:
162 162 c.rhodecode_name = 'Rhodecode'
163 163
164 164 c.causes = []
165 165 if is_http_error(base_response):
166 166 c.causes.append('Server is overloaded.')
167 167 c.causes.append('Server database connection is lost.')
168 168 c.causes.append('Server expected unhandled error.')
169 169
170 170 if hasattr(base_response, 'causes'):
171 171 c.causes = base_response.causes
172 172
173 173 c.messages = helpers.flash.pop_messages(request=request)
174 174 c.traceback = traceback.format_exc()
175 175 response = render_to_response(
176 176 '/errors/error_document.mako', {'c': c, 'h': helpers}, request=request,
177 177 response=base_response)
178 178
179 179 return response
180 180
181 181
182 182 def includeme_first(config):
183 183 # redirect automatic browser favicon.ico requests to correct place
184 184 def favicon_redirect(context, request):
185 185 return HTTPFound(
186 186 request.static_path('rhodecode:public/images/favicon.ico'))
187 187
188 188 config.add_view(favicon_redirect, route_name='favicon')
189 189 config.add_route('favicon', '/favicon.ico')
190 190
191 191 def robots_redirect(context, request):
192 192 return HTTPFound(
193 193 request.static_path('rhodecode:public/robots.txt'))
194 194
195 195 config.add_view(robots_redirect, route_name='robots')
196 196 config.add_route('robots', '/robots.txt')
197 197
198 198 config.add_static_view(
199 199 '_static/deform', 'deform:static')
200 200 config.add_static_view(
201 201 '_static/rhodecode', path='rhodecode:public', cache_max_age=3600 * 24)
202 202
203 203
204 204 def includeme(config):
205 205 settings = config.registry.settings
206 206 config.set_request_factory(Request)
207 207
208 208 # plugin information
209 209 config.registry.rhodecode_plugins = collections.OrderedDict()
210 210
211 211 config.add_directive(
212 212 'register_rhodecode_plugin', register_rhodecode_plugin)
213 213
214 214 config.add_directive('configure_celery', configure_celery)
215 215
216 216 if asbool(settings.get('appenlight', 'false')):
217 217 config.include('appenlight_client.ext.pyramid_tween')
218 218
219 219 # Includes which are required. The application would fail without them.
220 220 config.include('pyramid_mako')
221 221 config.include('pyramid_beaker')
222 222 config.include('rhodecode.lib.caches')
223 223
224 224 config.include('rhodecode.authentication')
225 225 config.include('rhodecode.integrations')
226 226
227 227 # apps
228 228 config.include('rhodecode.apps._base')
229 229 config.include('rhodecode.apps.ops')
230 230
231 231 config.include('rhodecode.apps.admin')
232 232 config.include('rhodecode.apps.channelstream')
233 233 config.include('rhodecode.apps.login')
234 234 config.include('rhodecode.apps.home')
235 235 config.include('rhodecode.apps.journal')
236 236 config.include('rhodecode.apps.repository')
237 237 config.include('rhodecode.apps.repo_group')
238 238 config.include('rhodecode.apps.user_group')
239 239 config.include('rhodecode.apps.search')
240 240 config.include('rhodecode.apps.user_profile')
241 241 config.include('rhodecode.apps.user_group_profile')
242 242 config.include('rhodecode.apps.my_account')
243 243 config.include('rhodecode.apps.svn_support')
244 244 config.include('rhodecode.apps.ssh_support')
245 245 config.include('rhodecode.apps.gist')
246 246
247 247 config.include('rhodecode.apps.debug_style')
248 248 config.include('rhodecode.tweens')
249 249 config.include('rhodecode.api')
250 250
251 251 config.add_route(
252 252 'rhodecode_support', 'https://rhodecode.com/help/', static=True)
253 253
254 254 config.add_translation_dirs('rhodecode:i18n/')
255 255 settings['default_locale_name'] = settings.get('lang', 'en')
256 256
257 257 # Add subscribers.
258 258 config.add_subscriber(inject_app_settings, ApplicationCreated)
259 259 config.add_subscriber(scan_repositories_if_enabled, ApplicationCreated)
260 260 config.add_subscriber(write_metadata_if_needed, ApplicationCreated)
261 261 config.add_subscriber(write_js_routes_if_enabled, ApplicationCreated)
262 262
263 263 # events
264 264 # TODO(marcink): this should be done when pyramid migration is finished
265 265 # config.add_subscriber(
266 266 # 'rhodecode.integrations.integrations_event_handler',
267 267 # 'rhodecode.events.RhodecodeEvent')
268 268
269 269 # request custom methods
270 270 config.add_request_method(
271 271 'rhodecode.lib.partial_renderer.get_partial_renderer',
272 272 'get_partial_renderer')
273 273
274 274 # Set the authorization policy.
275 275 authz_policy = ACLAuthorizationPolicy()
276 276 config.set_authorization_policy(authz_policy)
277 277
278 278 # Set the default renderer for HTML templates to mako.
279 279 config.add_mako_renderer('.html')
280 280
281 281 config.add_renderer(
282 282 name='json_ext',
283 283 factory='rhodecode.lib.ext_json_renderer.pyramid_ext_json')
284 284
285 285 # include RhodeCode plugins
286 286 includes = aslist(settings.get('rhodecode.includes', []))
287 287 for inc in includes:
288 288 config.include(inc)
289 289
290 290 # custom not found view, if our pyramid app doesn't know how to handle
291 291 # the request pass it to potential VCS handling ap
292 292 config.add_notfound_view(not_found_view)
293 293 if not settings.get('debugtoolbar.enabled', False):
294 294 # disabled debugtoolbar handle all exceptions via the error_handlers
295 295 config.add_view(error_handler, context=Exception)
296 296
297 297 # all errors including 403/404/50X
298 298 config.add_view(error_handler, context=HTTPError)
299 299
300 300
301 301 def wrap_app_in_wsgi_middlewares(pyramid_app, config):
302 302 """
303 303 Apply outer WSGI middlewares around the application.
304 304 """
305 305 settings = config.registry.settings
306 306
307 307 # enable https redirects based on HTTP_X_URL_SCHEME set by proxy
308 308 pyramid_app = HttpsFixup(pyramid_app, settings)
309 309
310 310 pyramid_app, _ae_client = wrap_in_appenlight_if_enabled(
311 311 pyramid_app, settings)
312 312 config.registry.ae_client = _ae_client
313 313
314 314 if settings['gzip_responses']:
315 315 pyramid_app = make_gzip_middleware(
316 316 pyramid_app, settings, compress_level=1)
317 317
318 318 # this should be the outer most middleware in the wsgi stack since
319 319 # middleware like Routes make database calls
320 320 def pyramid_app_with_cleanup(environ, start_response):
321 321 try:
322 322 return pyramid_app(environ, start_response)
323 323 finally:
324 324 # Dispose current database session and rollback uncommitted
325 325 # transactions.
326 326 meta.Session.remove()
327 327
328 328 # In a single threaded mode server, on non sqlite db we should have
329 329 # '0 Current Checked out connections' at the end of a request,
330 330 # if not, then something, somewhere is leaving a connection open
331 331 pool = meta.Base.metadata.bind.engine.pool
332 332 log.debug('sa pool status: %s', pool.status())
333 333
334 334 return pyramid_app_with_cleanup
335 335
336 336
337 337 def sanitize_settings_and_apply_defaults(settings):
338 338 """
339 339 Applies settings defaults and does all type conversion.
340 340
341 341 We would move all settings parsing and preparation into this place, so that
342 342 we have only one place left which deals with this part. The remaining parts
343 343 of the application would start to rely fully on well prepared settings.
344 344
345 345 This piece would later be split up per topic to avoid a big fat monster
346 346 function.
347 347 """
348 348
349 349 settings.setdefault('rhodecode.edition', 'Community Edition')
350 350
351 351 if 'mako.default_filters' not in settings:
352 352 # set custom default filters if we don't have it defined
353 353 settings['mako.imports'] = 'from rhodecode.lib.base import h_filter'
354 354 settings['mako.default_filters'] = 'h_filter'
355 355
356 356 if 'mako.directories' not in settings:
357 357 mako_directories = settings.setdefault('mako.directories', [
358 358 # Base templates of the original application
359 359 'rhodecode:templates',
360 360 ])
361 361 log.debug(
362 362 "Using the following Mako template directories: %s",
363 363 mako_directories)
364 364
365 365 # Default includes, possible to change as a user
366 366 pyramid_includes = settings.setdefault('pyramid.includes', [
367 367 'rhodecode.lib.middleware.request_wrapper',
368 368 ])
369 369 log.debug(
370 370 "Using the following pyramid.includes: %s",
371 371 pyramid_includes)
372 372
373 373 # TODO: johbo: Re-think this, usually the call to config.include
374 374 # should allow to pass in a prefix.
375 375 settings.setdefault('rhodecode.api.url', '/_admin/api')
376 376
377 377 # Sanitize generic settings.
378 378 _list_setting(settings, 'default_encoding', 'UTF-8')
379 379 _bool_setting(settings, 'is_test', 'false')
380 380 _bool_setting(settings, 'gzip_responses', 'false')
381 381
382 382 # Call split out functions that sanitize settings for each topic.
383 383 _sanitize_appenlight_settings(settings)
384 384 _sanitize_vcs_settings(settings)
385 385
386 386 # configure instance id
387 387 config_utils.set_instance_id(settings)
388 388
389 389 return settings
390 390
391 391
392 392 def _sanitize_appenlight_settings(settings):
393 393 _bool_setting(settings, 'appenlight', 'false')
394 394
395 395
396 396 def _sanitize_vcs_settings(settings):
397 397 """
398 398 Applies settings defaults and does type conversion for all VCS related
399 399 settings.
400 400 """
401 401 _string_setting(settings, 'vcs.svn.compatible_version', '')
402 402 _string_setting(settings, 'git_rev_filter', '--all')
403 403 _string_setting(settings, 'vcs.hooks.protocol', 'http')
404 _string_setting(settings, 'vcs.hooks.host', '127.0.0.1')
404 405 _string_setting(settings, 'vcs.scm_app_implementation', 'http')
405 406 _string_setting(settings, 'vcs.server', '')
406 407 _string_setting(settings, 'vcs.server.log_level', 'debug')
407 408 _string_setting(settings, 'vcs.server.protocol', 'http')
408 409 _bool_setting(settings, 'startup.import_repos', 'false')
409 410 _bool_setting(settings, 'vcs.hooks.direct_calls', 'false')
410 411 _bool_setting(settings, 'vcs.server.enable', 'true')
411 412 _bool_setting(settings, 'vcs.start_server', 'false')
412 413 _list_setting(settings, 'vcs.backends', 'hg, git, svn')
413 414 _int_setting(settings, 'vcs.connection_timeout', 3600)
414 415
415 416 # Support legacy values of vcs.scm_app_implementation. Legacy
416 417 # configurations may use 'rhodecode.lib.middleware.utils.scm_app_http'
417 418 # which is now mapped to 'http'.
418 419 scm_app_impl = settings['vcs.scm_app_implementation']
419 420 if scm_app_impl == 'rhodecode.lib.middleware.utils.scm_app_http':
420 421 settings['vcs.scm_app_implementation'] = 'http'
421 422
422 423
423 424 def _int_setting(settings, name, default):
424 425 settings[name] = int(settings.get(name, default))
425 426
426 427
427 428 def _bool_setting(settings, name, default):
428 429 input_val = settings.get(name, default)
429 430 if isinstance(input_val, unicode):
430 431 input_val = input_val.encode('utf8')
431 432 settings[name] = asbool(input_val)
432 433
433 434
434 435 def _list_setting(settings, name, default):
435 436 raw_value = settings.get(name, default)
436 437
437 438 old_separator = ','
438 439 if old_separator in raw_value:
439 440 # If we get a comma separated list, pass it to our own function.
440 441 settings[name] = rhodecode_aslist(raw_value, sep=old_separator)
441 442 else:
442 443 # Otherwise we assume it uses pyramids space/newline separation.
443 444 settings[name] = aslist(raw_value)
444 445
445 446
446 447 def _string_setting(settings, name, default, lower=True):
447 448 value = settings.get(name, default)
448 449 if lower:
449 450 value = value.lower()
450 451 settings[name] = value
451 452
452 453
453 454 def _substitute_values(mapping, substitutions):
454 455 result = {
455 456 # Note: Cannot use regular replacements, since they would clash
456 457 # with the implementation of ConfigParser. Using "format" instead.
457 458 key: value.format(**substitutions)
458 459 for key, value in mapping.items()
459 460 }
460 461 return result
@@ -1,87 +1,89 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import os
22 22 import shlex
23 23 import platform
24 24
25 25 from rhodecode.model import init_model
26 26
27 27
28
29 28 def configure_vcs(config):
30 29 """
31 30 Patch VCS config with some RhodeCode specific stuff
32 31 """
33 32 from rhodecode.lib.vcs import conf
33 import rhodecode.lib.vcs.conf.settings
34
34 35 conf.settings.BACKENDS = {
35 36 'hg': 'rhodecode.lib.vcs.backends.hg.MercurialRepository',
36 37 'git': 'rhodecode.lib.vcs.backends.git.GitRepository',
37 38 'svn': 'rhodecode.lib.vcs.backends.svn.SubversionRepository',
38 39 }
39 40
40 41 conf.settings.HOOKS_PROTOCOL = config['vcs.hooks.protocol']
42 conf.settings.HOOKS_HOST = config['vcs.hooks.host']
41 43 conf.settings.HOOKS_DIRECT_CALLS = config['vcs.hooks.direct_calls']
42 44 conf.settings.GIT_REV_FILTER = shlex.split(config['git_rev_filter'])
43 45 conf.settings.DEFAULT_ENCODINGS = config['default_encoding']
44 46 conf.settings.ALIASES[:] = config['vcs.backends']
45 47 conf.settings.SVN_COMPATIBLE_VERSION = config['vcs.svn.compatible_version']
46 48
47 49
48 50 def initialize_database(config):
49 51 from rhodecode.lib.utils2 import engine_from_config, get_encryption_key
50 52 engine = engine_from_config(config, 'sqlalchemy.db1.')
51 53 init_model(engine, encryption_key=get_encryption_key(config))
52 54
53 55
54 56 def initialize_test_environment(settings, test_env=None):
55 57 if test_env is None:
56 58 test_env = not int(os.environ.get('RC_NO_TMP_PATH', 0))
57 59
58 60 from rhodecode.lib.utils import (
59 61 create_test_directory, create_test_database, create_test_repositories,
60 62 create_test_index)
61 63 from rhodecode.tests import TESTS_TMP_PATH
62 64 from rhodecode.lib.vcs.backends.hg import largefiles_store
63 65 from rhodecode.lib.vcs.backends.git import lfs_store
64 66
65 67 # test repos
66 68 if test_env:
67 69 create_test_directory(TESTS_TMP_PATH)
68 70 # large object stores
69 71 create_test_directory(largefiles_store(TESTS_TMP_PATH))
70 72 create_test_directory(lfs_store(TESTS_TMP_PATH))
71 73
72 74 create_test_database(TESTS_TMP_PATH, settings)
73 75 create_test_repositories(TESTS_TMP_PATH, settings)
74 76 create_test_index(TESTS_TMP_PATH, settings)
75 77
76 78
77 79 def get_vcs_server_protocol(config):
78 80 return config['vcs.server.protocol']
79 81
80 82
81 83 def set_instance_id(config):
82 84 """ Sets a dynamic generated config['instance_id'] if missing or '*' """
83 85
84 86 config['instance_id'] = config.get('instance_id') or ''
85 87 if config['instance_id'] == '*' or not config['instance_id']:
86 88 _platform_id = platform.uname()[1] or 'instance'
87 89 config['instance_id'] = '%s-%s' % (_platform_id, os.getpid())
@@ -1,313 +1,313 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import os
22 22 import time
23 23 import logging
24 24 import tempfile
25 25 import traceback
26 26 import threading
27 27
28 28 from BaseHTTPServer import BaseHTTPRequestHandler
29 29 from SocketServer import TCPServer
30 30
31 31 import rhodecode
32 32 from rhodecode.model import meta
33 33 from rhodecode.lib.base import bootstrap_request, bootstrap_config
34 34 from rhodecode.lib import hooks_base
35 35 from rhodecode.lib.utils2 import AttributeDict
36 36 from rhodecode.lib.ext_json import json
37 37
38 38
39 39 log = logging.getLogger(__name__)
40 40
41 41
42 42 class HooksHttpHandler(BaseHTTPRequestHandler):
43 43
44 44 def do_POST(self):
45 45 method, extras = self._read_request()
46 46 txn_id = getattr(self.server, 'txn_id', None)
47 47 if txn_id:
48 48 from rhodecode.lib.caches import compute_key_from_params
49 49 log.debug('Computing TXN_ID based on `%s`:`%s`',
50 50 extras['repository'], extras['txn_id'])
51 51 computed_txn_id = compute_key_from_params(
52 52 extras['repository'], extras['txn_id'])
53 53 if txn_id != computed_txn_id:
54 54 raise Exception(
55 55 'TXN ID fail: expected {} got {} instead'.format(
56 56 txn_id, computed_txn_id))
57 57
58 58 try:
59 59 result = self._call_hook(method, extras)
60 60 except Exception as e:
61 61 exc_tb = traceback.format_exc()
62 62 result = {
63 63 'exception': e.__class__.__name__,
64 64 'exception_traceback': exc_tb,
65 65 'exception_args': e.args
66 66 }
67 67 self._write_response(result)
68 68
69 69 def _read_request(self):
70 70 length = int(self.headers['Content-Length'])
71 71 body = self.rfile.read(length).decode('utf-8')
72 72 data = json.loads(body)
73 73 return data['method'], data['extras']
74 74
75 75 def _write_response(self, result):
76 76 self.send_response(200)
77 77 self.send_header("Content-type", "text/json")
78 78 self.end_headers()
79 79 self.wfile.write(json.dumps(result))
80 80
81 81 def _call_hook(self, method, extras):
82 82 hooks = Hooks()
83 83 try:
84 84 result = getattr(hooks, method)(extras)
85 85 finally:
86 86 meta.Session.remove()
87 87 return result
88 88
89 89 def log_message(self, format, *args):
90 90 """
91 91 This is an overridden method of BaseHTTPRequestHandler which logs using
92 92 logging library instead of writing directly to stderr.
93 93 """
94 94
95 95 message = format % args
96 96
97 97 log.debug(
98 98 "%s - - [%s] %s", self.client_address[0],
99 99 self.log_date_time_string(), message)
100 100
101 101
102 102 class DummyHooksCallbackDaemon(object):
103 103 hooks_uri = ''
104 104
105 105 def __init__(self):
106 106 self.hooks_module = Hooks.__module__
107 107
108 108 def __enter__(self):
109 109 log.debug('Running dummy hooks callback daemon')
110 110 return self
111 111
112 112 def __exit__(self, exc_type, exc_val, exc_tb):
113 113 log.debug('Exiting dummy hooks callback daemon')
114 114
115 115
116 116 class ThreadedHookCallbackDaemon(object):
117 117
118 118 _callback_thread = None
119 119 _daemon = None
120 120 _done = False
121 121
122 def __init__(self, txn_id=None, port=None):
123 self._prepare(txn_id=txn_id, port=port)
122 def __init__(self, txn_id=None, host=None, port=None):
123 self._prepare(txn_id=txn_id, host=None, port=port)
124 124
125 125 def __enter__(self):
126 126 self._run()
127 127 return self
128 128
129 129 def __exit__(self, exc_type, exc_val, exc_tb):
130 130 log.debug('Callback daemon exiting now...')
131 131 self._stop()
132 132
133 def _prepare(self, txn_id=None, port=None):
133 def _prepare(self, txn_id=None, host=None, port=None):
134 134 raise NotImplementedError()
135 135
136 136 def _run(self):
137 137 raise NotImplementedError()
138 138
139 139 def _stop(self):
140 140 raise NotImplementedError()
141 141
142 142
143 143 class HttpHooksCallbackDaemon(ThreadedHookCallbackDaemon):
144 144 """
145 145 Context manager which will run a callback daemon in a background thread.
146 146 """
147 147
148 148 hooks_uri = None
149 149
150 IP_ADDRESS = '127.0.0.1'
151
152 150 # From Python docs: Polling reduces our responsiveness to a shutdown
153 151 # request and wastes cpu at all other times.
154 152 POLL_INTERVAL = 0.01
155 153
156 def _prepare(self, txn_id=None, port=None):
154 def _prepare(self, txn_id=None, host=None, port=None):
155 host = host or '127.0.0.1'
157 156 self._done = False
158 self._daemon = TCPServer((self.IP_ADDRESS, port or 0), HooksHttpHandler)
157 self._daemon = TCPServer((host, port or 0), HooksHttpHandler)
159 158 _, port = self._daemon.server_address
160 self.hooks_uri = '{}:{}'.format(self.IP_ADDRESS, port)
159 self.hooks_uri = '{}:{}'.format(host, port)
161 160 self.txn_id = txn_id
162 161 # inject transaction_id for later verification
163 162 self._daemon.txn_id = self.txn_id
164 163
165 164 log.debug(
166 165 "Preparing HTTP callback daemon at `%s` and registering hook object",
167 166 self.hooks_uri)
168 167
169 168 def _run(self):
170 169 log.debug("Running event loop of callback daemon in background thread")
171 170 callback_thread = threading.Thread(
172 171 target=self._daemon.serve_forever,
173 172 kwargs={'poll_interval': self.POLL_INTERVAL})
174 173 callback_thread.daemon = True
175 174 callback_thread.start()
176 175 self._callback_thread = callback_thread
177 176
178 177 def _stop(self):
179 178 log.debug("Waiting for background thread to finish.")
180 179 self._daemon.shutdown()
181 180 self._callback_thread.join()
182 181 self._daemon = None
183 182 self._callback_thread = None
184 183 if self.txn_id:
185 184 txn_id_file = get_txn_id_data_path(self.txn_id)
186 185 log.debug('Cleaning up TXN ID %s', txn_id_file)
187 186 if os.path.isfile(txn_id_file):
188 187 os.remove(txn_id_file)
189 188
190 189 log.debug("Background thread done.")
191 190
192 191
193 192 def get_txn_id_data_path(txn_id):
194 193 root = tempfile.gettempdir()
195 194 return os.path.join(root, 'rc_txn_id_{}'.format(txn_id))
196 195
197 196
198 197 def store_txn_id_data(txn_id, data_dict):
199 198 if not txn_id:
200 199 log.warning('Cannot store txn_id because it is empty')
201 200 return
202 201
203 202 path = get_txn_id_data_path(txn_id)
204 203 try:
205 204 with open(path, 'wb') as f:
206 205 f.write(json.dumps(data_dict))
207 206 except Exception:
208 207 log.exception('Failed to write txn_id metadata')
209 208
210 209
211 210 def get_txn_id_from_store(txn_id):
212 211 """
213 212 Reads txn_id from store and if present returns the data for callback manager
214 213 """
215 214 path = get_txn_id_data_path(txn_id)
216 215 try:
217 216 with open(path, 'rb') as f:
218 217 return json.loads(f.read())
219 218 except Exception:
220 219 return {}
221 220
222 221
223 def prepare_callback_daemon(extras, protocol, use_direct_calls, txn_id=None):
222 def prepare_callback_daemon(extras, protocol, host, use_direct_calls, txn_id=None):
224 223 txn_details = get_txn_id_from_store(txn_id)
225 224 port = txn_details.get('port', 0)
226 225 if use_direct_calls:
227 226 callback_daemon = DummyHooksCallbackDaemon()
228 227 extras['hooks_module'] = callback_daemon.hooks_module
229 228 else:
230 229 if protocol == 'http':
231 callback_daemon = HttpHooksCallbackDaemon(txn_id=txn_id, port=port)
230 callback_daemon = HttpHooksCallbackDaemon(
231 txn_id=txn_id, host=host, port=port)
232 232 else:
233 233 log.error('Unsupported callback daemon protocol "%s"', protocol)
234 234 raise Exception('Unsupported callback daemon protocol.')
235 235
236 236 extras['hooks_uri'] = callback_daemon.hooks_uri
237 237 extras['hooks_protocol'] = protocol
238 238 extras['time'] = time.time()
239 239
240 240 # register txn_id
241 241 extras['txn_id'] = txn_id
242 242
243 243 log.debug('Prepared a callback daemon: %s at url `%s`',
244 244 callback_daemon.__class__.__name__, callback_daemon.hooks_uri)
245 245 return callback_daemon, extras
246 246
247 247
248 248 class Hooks(object):
249 249 """
250 250 Exposes the hooks for remote call backs
251 251 """
252 252
253 253 def repo_size(self, extras):
254 254 log.debug("Called repo_size of %s object", self)
255 255 return self._call_hook(hooks_base.repo_size, extras)
256 256
257 257 def pre_pull(self, extras):
258 258 log.debug("Called pre_pull of %s object", self)
259 259 return self._call_hook(hooks_base.pre_pull, extras)
260 260
261 261 def post_pull(self, extras):
262 262 log.debug("Called post_pull of %s object", self)
263 263 return self._call_hook(hooks_base.post_pull, extras)
264 264
265 265 def pre_push(self, extras):
266 266 log.debug("Called pre_push of %s object", self)
267 267 return self._call_hook(hooks_base.pre_push, extras)
268 268
269 269 def post_push(self, extras):
270 270 log.debug("Called post_push of %s object", self)
271 271 return self._call_hook(hooks_base.post_push, extras)
272 272
273 273 def _call_hook(self, hook, extras):
274 274 extras = AttributeDict(extras)
275 275 server_url = extras['server_url']
276 276 request = bootstrap_request(application_url=server_url)
277 277
278 278 bootstrap_config(request) # inject routes and other interfaces
279 279
280 280 # inject the user for usage in hooks
281 281 request.user = AttributeDict({'username': extras.username,
282 282 'ip_addr': extras.ip,
283 283 'user_id': extras.user_id})
284 284
285 285 extras.request = request
286 286
287 287 try:
288 288 result = hook(extras)
289 289 except Exception as error:
290 290 exc_tb = traceback.format_exc()
291 291 log.exception('Exception when handling hook %s', hook)
292 292 error_args = error.args
293 293 return {
294 294 'status': 128,
295 295 'output': '',
296 296 'exception': type(error).__name__,
297 297 'exception_traceback': exc_tb,
298 298 'exception_args': error_args,
299 299 }
300 300 finally:
301 301 meta.Session.remove()
302 302
303 303 log.debug('Got hook call response %s', result)
304 304 return {
305 305 'status': result.status,
306 306 'output': result.output,
307 307 }
308 308
309 309 def __enter__(self):
310 310 return self
311 311
312 312 def __exit__(self, exc_type, exc_val, exc_tb):
313 313 pass
@@ -1,674 +1,674 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2014-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 SimpleVCS middleware for handling protocol request (push/clone etc.)
23 23 It's implemented with basic auth function
24 24 """
25 25
26 26 import os
27 27 import re
28 28 import logging
29 29 import importlib
30 30 from functools import wraps
31 31 from StringIO import StringIO
32 32 from lxml import etree
33 33
34 34 import time
35 35 from paste.httpheaders import REMOTE_USER, AUTH_TYPE
36 36
37 37 from pyramid.httpexceptions import (
38 38 HTTPNotFound, HTTPForbidden, HTTPNotAcceptable, HTTPInternalServerError)
39 39 from zope.cachedescriptors.property import Lazy as LazyProperty
40 40
41 41 import rhodecode
42 42 from rhodecode.authentication.base import (
43 43 authenticate, get_perms_cache_manager, VCS_TYPE, loadplugin)
44 44 from rhodecode.lib import caches
45 45 from rhodecode.lib.auth import AuthUser, HasPermissionAnyMiddleware
46 46 from rhodecode.lib.base import (
47 47 BasicAuth, get_ip_addr, get_user_agent, vcs_operation_context)
48 48 from rhodecode.lib.exceptions import (UserCreationError, NotAllowedToCreateUserError)
49 49 from rhodecode.lib.hooks_daemon import prepare_callback_daemon
50 50 from rhodecode.lib.middleware import appenlight
51 51 from rhodecode.lib.middleware.utils import scm_app_http
52 52 from rhodecode.lib.utils import is_valid_repo, SLUG_RE
53 53 from rhodecode.lib.utils2 import safe_str, fix_PATH, str2bool, safe_unicode
54 54 from rhodecode.lib.vcs.conf import settings as vcs_settings
55 55 from rhodecode.lib.vcs.backends import base
56 56
57 57 from rhodecode.model import meta
58 58 from rhodecode.model.db import User, Repository, PullRequest
59 59 from rhodecode.model.scm import ScmModel
60 60 from rhodecode.model.pull_request import PullRequestModel
61 61 from rhodecode.model.settings import SettingsModel, VcsSettingsModel
62 62
63 63 log = logging.getLogger(__name__)
64 64
65 65
66 66 def extract_svn_txn_id(acl_repo_name, data):
67 67 """
68 68 Helper method for extraction of svn txn_id from submited XML data during
69 69 POST operations
70 70 """
71 71 try:
72 72 root = etree.fromstring(data)
73 73 pat = re.compile(r'/txn/(?P<txn_id>.*)')
74 74 for el in root:
75 75 if el.tag == '{DAV:}source':
76 76 for sub_el in el:
77 77 if sub_el.tag == '{DAV:}href':
78 78 match = pat.search(sub_el.text)
79 79 if match:
80 80 svn_tx_id = match.groupdict()['txn_id']
81 81 txn_id = caches.compute_key_from_params(
82 82 acl_repo_name, svn_tx_id)
83 83 return txn_id
84 84 except Exception:
85 85 log.exception('Failed to extract txn_id')
86 86
87 87
88 88 def initialize_generator(factory):
89 89 """
90 90 Initializes the returned generator by draining its first element.
91 91
92 92 This can be used to give a generator an initializer, which is the code
93 93 up to the first yield statement. This decorator enforces that the first
94 94 produced element has the value ``"__init__"`` to make its special
95 95 purpose very explicit in the using code.
96 96 """
97 97
98 98 @wraps(factory)
99 99 def wrapper(*args, **kwargs):
100 100 gen = factory(*args, **kwargs)
101 101 try:
102 102 init = gen.next()
103 103 except StopIteration:
104 104 raise ValueError('Generator must yield at least one element.')
105 105 if init != "__init__":
106 106 raise ValueError('First yielded element must be "__init__".')
107 107 return gen
108 108 return wrapper
109 109
110 110
111 111 class SimpleVCS(object):
112 112 """Common functionality for SCM HTTP handlers."""
113 113
114 114 SCM = 'unknown'
115 115
116 116 acl_repo_name = None
117 117 url_repo_name = None
118 118 vcs_repo_name = None
119 119 rc_extras = {}
120 120
121 121 # We have to handle requests to shadow repositories different than requests
122 122 # to normal repositories. Therefore we have to distinguish them. To do this
123 123 # we use this regex which will match only on URLs pointing to shadow
124 124 # repositories.
125 125 shadow_repo_re = re.compile(
126 126 '(?P<groups>(?:{slug_pat}/)*)' # repo groups
127 127 '(?P<target>{slug_pat})/' # target repo
128 128 'pull-request/(?P<pr_id>\d+)/' # pull request
129 129 'repository$' # shadow repo
130 130 .format(slug_pat=SLUG_RE.pattern))
131 131
132 132 def __init__(self, config, registry):
133 133 self.registry = registry
134 134 self.config = config
135 135 # re-populated by specialized middleware
136 136 self.repo_vcs_config = base.Config()
137 137 self.rhodecode_settings = SettingsModel().get_all_settings(cache=True)
138 138
139 139 registry.rhodecode_settings = self.rhodecode_settings
140 140 # authenticate this VCS request using authfunc
141 141 auth_ret_code_detection = \
142 142 str2bool(self.config.get('auth_ret_code_detection', False))
143 143 self.authenticate = BasicAuth(
144 144 '', authenticate, registry, config.get('auth_ret_code'),
145 145 auth_ret_code_detection)
146 146 self.ip_addr = '0.0.0.0'
147 147
148 148 @LazyProperty
149 149 def global_vcs_config(self):
150 150 try:
151 151 return VcsSettingsModel().get_ui_settings_as_config_obj()
152 152 except Exception:
153 153 return base.Config()
154 154
155 155 @property
156 156 def base_path(self):
157 157 settings_path = self.repo_vcs_config.get(
158 158 *VcsSettingsModel.PATH_SETTING)
159 159
160 160 if not settings_path:
161 161 settings_path = self.global_vcs_config.get(
162 162 *VcsSettingsModel.PATH_SETTING)
163 163
164 164 if not settings_path:
165 165 # try, maybe we passed in explicitly as config option
166 166 settings_path = self.config.get('base_path')
167 167
168 168 if not settings_path:
169 169 raise ValueError('FATAL: base_path is empty')
170 170 return settings_path
171 171
172 172 def set_repo_names(self, environ):
173 173 """
174 174 This will populate the attributes acl_repo_name, url_repo_name,
175 175 vcs_repo_name and is_shadow_repo. In case of requests to normal (non
176 176 shadow) repositories all names are equal. In case of requests to a
177 177 shadow repository the acl-name points to the target repo of the pull
178 178 request and the vcs-name points to the shadow repo file system path.
179 179 The url-name is always the URL used by the vcs client program.
180 180
181 181 Example in case of a shadow repo:
182 182 acl_repo_name = RepoGroup/MyRepo
183 183 url_repo_name = RepoGroup/MyRepo/pull-request/3/repository
184 184 vcs_repo_name = /repo/base/path/RepoGroup/.__shadow_MyRepo_pr-3'
185 185 """
186 186 # First we set the repo name from URL for all attributes. This is the
187 187 # default if handling normal (non shadow) repo requests.
188 188 self.url_repo_name = self._get_repository_name(environ)
189 189 self.acl_repo_name = self.vcs_repo_name = self.url_repo_name
190 190 self.is_shadow_repo = False
191 191
192 192 # Check if this is a request to a shadow repository.
193 193 match = self.shadow_repo_re.match(self.url_repo_name)
194 194 if match:
195 195 match_dict = match.groupdict()
196 196
197 197 # Build acl repo name from regex match.
198 198 acl_repo_name = safe_unicode('{groups}{target}'.format(
199 199 groups=match_dict['groups'] or '',
200 200 target=match_dict['target']))
201 201
202 202 # Retrieve pull request instance by ID from regex match.
203 203 pull_request = PullRequest.get(match_dict['pr_id'])
204 204
205 205 # Only proceed if we got a pull request and if acl repo name from
206 206 # URL equals the target repo name of the pull request.
207 207 if pull_request and \
208 208 (acl_repo_name == pull_request.target_repo.repo_name):
209 209 repo_id = pull_request.target_repo.repo_id
210 210 # Get file system path to shadow repository.
211 211 workspace_id = PullRequestModel()._workspace_id(pull_request)
212 212 target_vcs = pull_request.target_repo.scm_instance()
213 213 vcs_repo_name = target_vcs._get_shadow_repository_path(
214 214 repo_id, workspace_id)
215 215
216 216 # Store names for later usage.
217 217 self.vcs_repo_name = vcs_repo_name
218 218 self.acl_repo_name = acl_repo_name
219 219 self.is_shadow_repo = True
220 220
221 221 log.debug('Setting all VCS repository names: %s', {
222 222 'acl_repo_name': self.acl_repo_name,
223 223 'url_repo_name': self.url_repo_name,
224 224 'vcs_repo_name': self.vcs_repo_name,
225 225 })
226 226
227 227 @property
228 228 def scm_app(self):
229 229 custom_implementation = self.config['vcs.scm_app_implementation']
230 230 if custom_implementation == 'http':
231 231 log.info('Using HTTP implementation of scm app.')
232 232 scm_app_impl = scm_app_http
233 233 else:
234 234 log.info('Using custom implementation of scm_app: "{}"'.format(
235 235 custom_implementation))
236 236 scm_app_impl = importlib.import_module(custom_implementation)
237 237 return scm_app_impl
238 238
239 239 def _get_by_id(self, repo_name):
240 240 """
241 241 Gets a special pattern _<ID> from clone url and tries to replace it
242 242 with a repository_name for support of _<ID> non changeable urls
243 243 """
244 244
245 245 data = repo_name.split('/')
246 246 if len(data) >= 2:
247 247 from rhodecode.model.repo import RepoModel
248 248 by_id_match = RepoModel().get_repo_by_id(repo_name)
249 249 if by_id_match:
250 250 data[1] = by_id_match.repo_name
251 251
252 252 return safe_str('/'.join(data))
253 253
254 254 def _invalidate_cache(self, repo_name):
255 255 """
256 256 Set's cache for this repository for invalidation on next access
257 257
258 258 :param repo_name: full repo name, also a cache key
259 259 """
260 260 ScmModel().mark_for_invalidation(repo_name)
261 261
262 262 def is_valid_and_existing_repo(self, repo_name, base_path, scm_type):
263 263 db_repo = Repository.get_by_repo_name(repo_name)
264 264 if not db_repo:
265 265 log.debug('Repository `%s` not found inside the database.',
266 266 repo_name)
267 267 return False
268 268
269 269 if db_repo.repo_type != scm_type:
270 270 log.warning(
271 271 'Repository `%s` have incorrect scm_type, expected %s got %s',
272 272 repo_name, db_repo.repo_type, scm_type)
273 273 return False
274 274
275 275 config = db_repo._config
276 276 config.set('extensions', 'largefiles', '')
277 277 return is_valid_repo(
278 278 repo_name, base_path,
279 279 explicit_scm=scm_type, expect_scm=scm_type, config=config)
280 280
281 281 def valid_and_active_user(self, user):
282 282 """
283 283 Checks if that user is not empty, and if it's actually object it checks
284 284 if he's active.
285 285
286 286 :param user: user object or None
287 287 :return: boolean
288 288 """
289 289 if user is None:
290 290 return False
291 291
292 292 elif user.active:
293 293 return True
294 294
295 295 return False
296 296
297 297 @property
298 298 def is_shadow_repo_dir(self):
299 299 return os.path.isdir(self.vcs_repo_name)
300 300
301 301 def _check_permission(self, action, user, repo_name, ip_addr=None,
302 302 plugin_id='', plugin_cache_active=False, cache_ttl=0):
303 303 """
304 304 Checks permissions using action (push/pull) user and repository
305 305 name. If plugin_cache and ttl is set it will use the plugin which
306 306 authenticated the user to store the cached permissions result for N
307 307 amount of seconds as in cache_ttl
308 308
309 309 :param action: push or pull action
310 310 :param user: user instance
311 311 :param repo_name: repository name
312 312 """
313 313
314 314 # get instance of cache manager configured for a namespace
315 315 cache_manager = get_perms_cache_manager(
316 316 custom_ttl=cache_ttl, suffix=user.user_id)
317 317 log.debug('AUTH_CACHE_TTL for permissions `%s` active: %s (TTL: %s)',
318 318 plugin_id, plugin_cache_active, cache_ttl)
319 319
320 320 # for environ based password can be empty, but then the validation is
321 321 # on the server that fills in the env data needed for authentication
322 322 _perm_calc_hash = caches.compute_key_from_params(
323 323 plugin_id, action, user.user_id, repo_name, ip_addr)
324 324
325 325 # _authenticate is a wrapper for .auth() method of plugin.
326 326 # it checks if .auth() sends proper data.
327 327 # For RhodeCodeExternalAuthPlugin it also maps users to
328 328 # Database and maps the attributes returned from .auth()
329 329 # to RhodeCode database. If this function returns data
330 330 # then auth is correct.
331 331 start = time.time()
332 332 log.debug('Running plugin `%s` permissions check', plugin_id)
333 333
334 334 def perm_func():
335 335 """
336 336 This function is used internally in Cache of Beaker to calculate
337 337 Results
338 338 """
339 339 log.debug('auth: calculating permission access now...')
340 340 # check IP
341 341 inherit = user.inherit_default_permissions
342 342 ip_allowed = AuthUser.check_ip_allowed(
343 343 user.user_id, ip_addr, inherit_from_default=inherit)
344 344 if ip_allowed:
345 345 log.info('Access for IP:%s allowed', ip_addr)
346 346 else:
347 347 return False
348 348
349 349 if action == 'push':
350 350 perms = ('repository.write', 'repository.admin')
351 351 if not HasPermissionAnyMiddleware(*perms)(user, repo_name):
352 352 return False
353 353
354 354 else:
355 355 # any other action need at least read permission
356 356 perms = (
357 357 'repository.read', 'repository.write', 'repository.admin')
358 358 if not HasPermissionAnyMiddleware(*perms)(user, repo_name):
359 359 return False
360 360
361 361 return True
362 362
363 363 if plugin_cache_active:
364 364 log.debug('Trying to fetch cached perms by %s', _perm_calc_hash[:6])
365 365 perm_result = cache_manager.get(
366 366 _perm_calc_hash, createfunc=perm_func)
367 367 else:
368 368 perm_result = perm_func()
369 369
370 370 auth_time = time.time() - start
371 371 log.debug('Permissions for plugin `%s` completed in %.3fs, '
372 372 'expiration time of fetched cache %.1fs.',
373 373 plugin_id, auth_time, cache_ttl)
374 374
375 375 return perm_result
376 376
377 377 def _check_ssl(self, environ, start_response):
378 378 """
379 379 Checks the SSL check flag and returns False if SSL is not present
380 380 and required True otherwise
381 381 """
382 382 org_proto = environ['wsgi._org_proto']
383 383 # check if we have SSL required ! if not it's a bad request !
384 384 require_ssl = str2bool(self.repo_vcs_config.get('web', 'push_ssl'))
385 385 if require_ssl and org_proto == 'http':
386 386 log.debug(
387 387 'Bad request: detected protocol is `%s` and '
388 388 'SSL/HTTPS is required.', org_proto)
389 389 return False
390 390 return True
391 391
392 392 def _get_default_cache_ttl(self):
393 393 # take AUTH_CACHE_TTL from the `rhodecode` auth plugin
394 394 plugin = loadplugin('egg:rhodecode-enterprise-ce#rhodecode')
395 395 plugin_settings = plugin.get_settings()
396 396 plugin_cache_active, cache_ttl = plugin.get_ttl_cache(
397 397 plugin_settings) or (False, 0)
398 398 return plugin_cache_active, cache_ttl
399 399
400 400 def __call__(self, environ, start_response):
401 401 try:
402 402 return self._handle_request(environ, start_response)
403 403 except Exception:
404 404 log.exception("Exception while handling request")
405 405 appenlight.track_exception(environ)
406 406 return HTTPInternalServerError()(environ, start_response)
407 407 finally:
408 408 meta.Session.remove()
409 409
410 410 def _handle_request(self, environ, start_response):
411 411
412 412 if not self._check_ssl(environ, start_response):
413 413 reason = ('SSL required, while RhodeCode was unable '
414 414 'to detect this as SSL request')
415 415 log.debug('User not allowed to proceed, %s', reason)
416 416 return HTTPNotAcceptable(reason)(environ, start_response)
417 417
418 418 if not self.url_repo_name:
419 419 log.warning('Repository name is empty: %s', self.url_repo_name)
420 420 # failed to get repo name, we fail now
421 421 return HTTPNotFound()(environ, start_response)
422 422 log.debug('Extracted repo name is %s', self.url_repo_name)
423 423
424 424 ip_addr = get_ip_addr(environ)
425 425 user_agent = get_user_agent(environ)
426 426 username = None
427 427
428 428 # skip passing error to error controller
429 429 environ['pylons.status_code_redirect'] = True
430 430
431 431 # ======================================================================
432 432 # GET ACTION PULL or PUSH
433 433 # ======================================================================
434 434 action = self._get_action(environ)
435 435
436 436 # ======================================================================
437 437 # Check if this is a request to a shadow repository of a pull request.
438 438 # In this case only pull action is allowed.
439 439 # ======================================================================
440 440 if self.is_shadow_repo and action != 'pull':
441 441 reason = 'Only pull action is allowed for shadow repositories.'
442 442 log.debug('User not allowed to proceed, %s', reason)
443 443 return HTTPNotAcceptable(reason)(environ, start_response)
444 444
445 445 # Check if the shadow repo actually exists, in case someone refers
446 446 # to it, and it has been deleted because of successful merge.
447 447 if self.is_shadow_repo and not self.is_shadow_repo_dir:
448 448 log.debug(
449 449 'Shadow repo detected, and shadow repo dir `%s` is missing',
450 450 self.is_shadow_repo_dir)
451 451 return HTTPNotFound()(environ, start_response)
452 452
453 453 # ======================================================================
454 454 # CHECK ANONYMOUS PERMISSION
455 455 # ======================================================================
456 456 if action in ['pull', 'push']:
457 457 anonymous_user = User.get_default_user()
458 458 username = anonymous_user.username
459 459 if anonymous_user.active:
460 460 plugin_cache_active, cache_ttl = self._get_default_cache_ttl()
461 461 # ONLY check permissions if the user is activated
462 462 anonymous_perm = self._check_permission(
463 463 action, anonymous_user, self.acl_repo_name, ip_addr,
464 464 plugin_id='anonymous_access',
465 465 plugin_cache_active=plugin_cache_active,
466 466 cache_ttl=cache_ttl,
467 467 )
468 468 else:
469 469 anonymous_perm = False
470 470
471 471 if not anonymous_user.active or not anonymous_perm:
472 472 if not anonymous_user.active:
473 473 log.debug('Anonymous access is disabled, running '
474 474 'authentication')
475 475
476 476 if not anonymous_perm:
477 477 log.debug('Not enough credentials to access this '
478 478 'repository as anonymous user')
479 479
480 480 username = None
481 481 # ==============================================================
482 482 # DEFAULT PERM FAILED OR ANONYMOUS ACCESS IS DISABLED SO WE
483 483 # NEED TO AUTHENTICATE AND ASK FOR AUTH USER PERMISSIONS
484 484 # ==============================================================
485 485
486 486 # try to auth based on environ, container auth methods
487 487 log.debug('Running PRE-AUTH for container based authentication')
488 488 pre_auth = authenticate(
489 489 '', '', environ, VCS_TYPE, registry=self.registry,
490 490 acl_repo_name=self.acl_repo_name)
491 491 if pre_auth and pre_auth.get('username'):
492 492 username = pre_auth['username']
493 493 log.debug('PRE-AUTH got %s as username', username)
494 494 if pre_auth:
495 495 log.debug('PRE-AUTH successful from %s',
496 496 pre_auth.get('auth_data', {}).get('_plugin'))
497 497
498 498 # If not authenticated by the container, running basic auth
499 499 # before inject the calling repo_name for special scope checks
500 500 self.authenticate.acl_repo_name = self.acl_repo_name
501 501
502 502 plugin_cache_active, cache_ttl = False, 0
503 503 plugin = None
504 504 if not username:
505 505 self.authenticate.realm = self.authenticate.get_rc_realm()
506 506
507 507 try:
508 508 auth_result = self.authenticate(environ)
509 509 except (UserCreationError, NotAllowedToCreateUserError) as e:
510 510 log.error(e)
511 511 reason = safe_str(e)
512 512 return HTTPNotAcceptable(reason)(environ, start_response)
513 513
514 514 if isinstance(auth_result, dict):
515 515 AUTH_TYPE.update(environ, 'basic')
516 516 REMOTE_USER.update(environ, auth_result['username'])
517 517 username = auth_result['username']
518 518 plugin = auth_result.get('auth_data', {}).get('_plugin')
519 519 log.info(
520 520 'MAIN-AUTH successful for user `%s` from %s plugin',
521 521 username, plugin)
522 522
523 523 plugin_cache_active, cache_ttl = auth_result.get(
524 524 'auth_data', {}).get('_ttl_cache') or (False, 0)
525 525 else:
526 526 return auth_result.wsgi_application(
527 527 environ, start_response)
528 528
529 529
530 530 # ==============================================================
531 531 # CHECK PERMISSIONS FOR THIS REQUEST USING GIVEN USERNAME
532 532 # ==============================================================
533 533 user = User.get_by_username(username)
534 534 if not self.valid_and_active_user(user):
535 535 return HTTPForbidden()(environ, start_response)
536 536 username = user.username
537 537 user.update_lastactivity()
538 538 meta.Session().commit()
539 539
540 540 # check user attributes for password change flag
541 541 user_obj = user
542 542 if user_obj and user_obj.username != User.DEFAULT_USER and \
543 543 user_obj.user_data.get('force_password_change'):
544 544 reason = 'password change required'
545 545 log.debug('User not allowed to authenticate, %s', reason)
546 546 return HTTPNotAcceptable(reason)(environ, start_response)
547 547
548 548 # check permissions for this repository
549 549 perm = self._check_permission(
550 550 action, user, self.acl_repo_name, ip_addr,
551 551 plugin, plugin_cache_active, cache_ttl)
552 552 if not perm:
553 553 return HTTPForbidden()(environ, start_response)
554 554
555 555 # extras are injected into UI object and later available
556 556 # in hooks executed by RhodeCode
557 557 check_locking = _should_check_locking(environ.get('QUERY_STRING'))
558 558 extras = vcs_operation_context(
559 559 environ, repo_name=self.acl_repo_name, username=username,
560 560 action=action, scm=self.SCM, check_locking=check_locking,
561 561 is_shadow_repo=self.is_shadow_repo
562 562 )
563 563
564 564 # ======================================================================
565 565 # REQUEST HANDLING
566 566 # ======================================================================
567 567 repo_path = os.path.join(
568 568 safe_str(self.base_path), safe_str(self.vcs_repo_name))
569 569 log.debug('Repository path is %s', repo_path)
570 570
571 571 fix_PATH()
572 572
573 573 log.info(
574 574 '%s action on %s repo "%s" by "%s" from %s %s',
575 575 action, self.SCM, safe_str(self.url_repo_name),
576 576 safe_str(username), ip_addr, user_agent)
577 577
578 578 return self._generate_vcs_response(
579 579 environ, start_response, repo_path, extras, action)
580 580
581 581 @initialize_generator
582 582 def _generate_vcs_response(
583 583 self, environ, start_response, repo_path, extras, action):
584 584 """
585 585 Returns a generator for the response content.
586 586
587 587 This method is implemented as a generator, so that it can trigger
588 588 the cache validation after all content sent back to the client. It
589 589 also handles the locking exceptions which will be triggered when
590 590 the first chunk is produced by the underlying WSGI application.
591 591 """
592 592 txn_id = ''
593 593 if 'CONTENT_LENGTH' in environ and environ['REQUEST_METHOD'] == 'MERGE':
594 594 # case for SVN, we want to re-use the callback daemon port
595 595 # so we use the txn_id, for this we peek the body, and still save
596 596 # it as wsgi.input
597 597 data = environ['wsgi.input'].read()
598 598 environ['wsgi.input'] = StringIO(data)
599 599 txn_id = extract_svn_txn_id(self.acl_repo_name, data)
600 600
601 601 callback_daemon, extras = self._prepare_callback_daemon(
602 602 extras, environ, action, txn_id=txn_id)
603 603 log.debug('HOOKS extras is %s', extras)
604 604
605 605 config = self._create_config(extras, self.acl_repo_name)
606 606 app = self._create_wsgi_app(repo_path, self.url_repo_name, config)
607 607 with callback_daemon:
608 608 app.rc_extras = extras
609 609
610 610 try:
611 611 response = app(environ, start_response)
612 612 finally:
613 613 # This statement works together with the decorator
614 614 # "initialize_generator" above. The decorator ensures that
615 615 # we hit the first yield statement before the generator is
616 616 # returned back to the WSGI server. This is needed to
617 617 # ensure that the call to "app" above triggers the
618 618 # needed callback to "start_response" before the
619 619 # generator is actually used.
620 620 yield "__init__"
621 621
622 622 # iter content
623 623 for chunk in response:
624 624 yield chunk
625 625
626 626 try:
627 627 # invalidate cache on push
628 628 if action == 'push':
629 629 self._invalidate_cache(self.url_repo_name)
630 630 finally:
631 631 meta.Session.remove()
632 632
633 633 def _get_repository_name(self, environ):
634 634 """Get repository name out of the environmnent
635 635
636 636 :param environ: WSGI environment
637 637 """
638 638 raise NotImplementedError()
639 639
640 640 def _get_action(self, environ):
641 641 """Map request commands into a pull or push command.
642 642
643 643 :param environ: WSGI environment
644 644 """
645 645 raise NotImplementedError()
646 646
647 647 def _create_wsgi_app(self, repo_path, repo_name, config):
648 648 """Return the WSGI app that will finally handle the request."""
649 649 raise NotImplementedError()
650 650
651 651 def _create_config(self, extras, repo_name):
652 652 """Create a safe config representation."""
653 653 raise NotImplementedError()
654 654
655 655 def _should_use_callback_daemon(self, extras, environ, action):
656 656 return True
657 657
658 658 def _prepare_callback_daemon(self, extras, environ, action, txn_id=None):
659 659 direct_calls = vcs_settings.HOOKS_DIRECT_CALLS
660 660 if not self._should_use_callback_daemon(extras, environ, action):
661 661 # disable callback daemon for actions that don't require it
662 662 direct_calls = True
663 663
664 664 return prepare_callback_daemon(
665 665 extras, protocol=vcs_settings.HOOKS_PROTOCOL,
666 use_direct_calls=direct_calls, txn_id=txn_id)
666 host=vcs_settings.HOOKS_HOST, use_direct_calls=direct_calls, txn_id=txn_id)
667 667
668 668
669 669 def _should_check_locking(query_string):
670 670 # this is kind of hacky, but due to how mercurial handles client-server
671 671 # server see all operation on commit; bookmarks, phases and
672 672 # obsolescence marker in different transaction, we don't want to check
673 673 # locking on those
674 674 return query_string not in ['cmd=listkeys']
@@ -1,66 +1,67 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2014-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Internal settings for vcs-lib
23 23 """
24 24
25 25 # list of default encoding used in safe_unicode/safe_str methods
26 26 DEFAULT_ENCODINGS = ['utf8']
27 27
28 28 # Optional arguments to rev-filter, it has to be a list
29 29 # It can also be ['--branches', '--tags']
30 30 GIT_REV_FILTER = ['--all']
31 31
32 32 # Compatibility version when creating SVN repositories. None means newest.
33 33 # Other available options are: pre-1.4-compatible, pre-1.5-compatible,
34 34 # pre-1.6-compatible, pre-1.8-compatible
35 35 SVN_COMPATIBLE_VERSION = None
36 36
37 37 ALIASES = ['hg', 'git', 'svn']
38 38
39 39 BACKENDS = {
40 40 'hg': 'rhodecode.lib.vcs.backends.hg.MercurialRepository',
41 41 'git': 'rhodecode.lib.vcs.backends.git.GitRepository',
42 42 'svn': 'rhodecode.lib.vcs.backends.svn.SubversionRepository',
43 43 }
44 44
45 45 # TODO: Remove once controllers/files.py is adjusted
46 46 ARCHIVE_SPECS = {
47 47 'tbz2': ('application/x-bzip2', '.tar.bz2'),
48 48 'tgz': ('application/x-gzip', '.tar.gz'),
49 49 'zip': ('application/zip', '.zip'),
50 50 }
51 51
52 52 HOOKS_PROTOCOL = None
53 53 HOOKS_DIRECT_CALLS = False
54 HOOKS_HOST = '127.0.0.1'
54 55
55 56
56 57 def available_aliases():
57 58 """
58 59 Mercurial is required for the system to work, so in case vcs.backends does
59 60 not include it, we make sure it will be available internally
60 61 TODO: anderson: refactor vcs.backends so it won't be necessary, VCS server
61 62 should be responsible to dictate available backends.
62 63 """
63 64 aliases = ALIASES[:]
64 65 if 'hg' not in aliases:
65 66 aliases += ['hg']
66 67 return aliases
@@ -1,1700 +1,1701 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2012-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 pull request model for RhodeCode
24 24 """
25 25
26 26
27 27 import json
28 28 import logging
29 29 import datetime
30 30 import urllib
31 31 import collections
32 32
33 33 from pyramid.threadlocal import get_current_request
34 34
35 35 from rhodecode import events
36 36 from rhodecode.translation import lazy_ugettext#, _
37 37 from rhodecode.lib import helpers as h, hooks_utils, diffs
38 38 from rhodecode.lib import audit_logger
39 39 from rhodecode.lib.compat import OrderedDict
40 40 from rhodecode.lib.hooks_daemon import prepare_callback_daemon
41 41 from rhodecode.lib.markup_renderer import (
42 42 DEFAULT_COMMENTS_RENDERER, RstTemplateRenderer)
43 43 from rhodecode.lib.utils2 import safe_unicode, safe_str, md5_safe
44 44 from rhodecode.lib.vcs.backends.base import (
45 45 Reference, MergeResponse, MergeFailureReason, UpdateFailureReason)
46 46 from rhodecode.lib.vcs.conf import settings as vcs_settings
47 47 from rhodecode.lib.vcs.exceptions import (
48 48 CommitDoesNotExistError, EmptyRepositoryError)
49 49 from rhodecode.model import BaseModel
50 50 from rhodecode.model.changeset_status import ChangesetStatusModel
51 51 from rhodecode.model.comment import CommentsModel
52 52 from rhodecode.model.db import (
53 53 or_, PullRequest, PullRequestReviewers, ChangesetStatus,
54 54 PullRequestVersion, ChangesetComment, Repository, RepoReviewRule)
55 55 from rhodecode.model.meta import Session
56 56 from rhodecode.model.notification import NotificationModel, \
57 57 EmailNotificationModel
58 58 from rhodecode.model.scm import ScmModel
59 59 from rhodecode.model.settings import VcsSettingsModel
60 60
61 61
62 62 log = logging.getLogger(__name__)
63 63
64 64
65 65 # Data structure to hold the response data when updating commits during a pull
66 66 # request update.
67 67 UpdateResponse = collections.namedtuple('UpdateResponse', [
68 68 'executed', 'reason', 'new', 'old', 'changes',
69 69 'source_changed', 'target_changed'])
70 70
71 71
72 72 class PullRequestModel(BaseModel):
73 73
74 74 cls = PullRequest
75 75
76 76 DIFF_CONTEXT = 3
77 77
78 78 MERGE_STATUS_MESSAGES = {
79 79 MergeFailureReason.NONE: lazy_ugettext(
80 80 'This pull request can be automatically merged.'),
81 81 MergeFailureReason.UNKNOWN: lazy_ugettext(
82 82 'This pull request cannot be merged because of an unhandled'
83 83 ' exception.'),
84 84 MergeFailureReason.MERGE_FAILED: lazy_ugettext(
85 85 'This pull request cannot be merged because of merge conflicts.'),
86 86 MergeFailureReason.PUSH_FAILED: lazy_ugettext(
87 87 'This pull request could not be merged because push to target'
88 88 ' failed.'),
89 89 MergeFailureReason.TARGET_IS_NOT_HEAD: lazy_ugettext(
90 90 'This pull request cannot be merged because the target is not a'
91 91 ' head.'),
92 92 MergeFailureReason.HG_SOURCE_HAS_MORE_BRANCHES: lazy_ugettext(
93 93 'This pull request cannot be merged because the source contains'
94 94 ' more branches than the target.'),
95 95 MergeFailureReason.HG_TARGET_HAS_MULTIPLE_HEADS: lazy_ugettext(
96 96 'This pull request cannot be merged because the target has'
97 97 ' multiple heads.'),
98 98 MergeFailureReason.TARGET_IS_LOCKED: lazy_ugettext(
99 99 'This pull request cannot be merged because the target repository'
100 100 ' is locked.'),
101 101 MergeFailureReason._DEPRECATED_MISSING_COMMIT: lazy_ugettext(
102 102 'This pull request cannot be merged because the target or the '
103 103 'source reference is missing.'),
104 104 MergeFailureReason.MISSING_TARGET_REF: lazy_ugettext(
105 105 'This pull request cannot be merged because the target '
106 106 'reference is missing.'),
107 107 MergeFailureReason.MISSING_SOURCE_REF: lazy_ugettext(
108 108 'This pull request cannot be merged because the source '
109 109 'reference is missing.'),
110 110 MergeFailureReason.SUBREPO_MERGE_FAILED: lazy_ugettext(
111 111 'This pull request cannot be merged because of conflicts related '
112 112 'to sub repositories.'),
113 113 }
114 114
115 115 UPDATE_STATUS_MESSAGES = {
116 116 UpdateFailureReason.NONE: lazy_ugettext(
117 117 'Pull request update successful.'),
118 118 UpdateFailureReason.UNKNOWN: lazy_ugettext(
119 119 'Pull request update failed because of an unknown error.'),
120 120 UpdateFailureReason.NO_CHANGE: lazy_ugettext(
121 121 'No update needed because the source and target have not changed.'),
122 122 UpdateFailureReason.WRONG_REF_TYPE: lazy_ugettext(
123 123 'Pull request cannot be updated because the reference type is '
124 124 'not supported for an update. Only Branch, Tag or Bookmark is allowed.'),
125 125 UpdateFailureReason.MISSING_TARGET_REF: lazy_ugettext(
126 126 'This pull request cannot be updated because the target '
127 127 'reference is missing.'),
128 128 UpdateFailureReason.MISSING_SOURCE_REF: lazy_ugettext(
129 129 'This pull request cannot be updated because the source '
130 130 'reference is missing.'),
131 131 }
132 132
133 133 def __get_pull_request(self, pull_request):
134 134 return self._get_instance((
135 135 PullRequest, PullRequestVersion), pull_request)
136 136
137 137 def _check_perms(self, perms, pull_request, user, api=False):
138 138 if not api:
139 139 return h.HasRepoPermissionAny(*perms)(
140 140 user=user, repo_name=pull_request.target_repo.repo_name)
141 141 else:
142 142 return h.HasRepoPermissionAnyApi(*perms)(
143 143 user=user, repo_name=pull_request.target_repo.repo_name)
144 144
145 145 def check_user_read(self, pull_request, user, api=False):
146 146 _perms = ('repository.admin', 'repository.write', 'repository.read',)
147 147 return self._check_perms(_perms, pull_request, user, api)
148 148
149 149 def check_user_merge(self, pull_request, user, api=False):
150 150 _perms = ('repository.admin', 'repository.write', 'hg.admin',)
151 151 return self._check_perms(_perms, pull_request, user, api)
152 152
153 153 def check_user_update(self, pull_request, user, api=False):
154 154 owner = user.user_id == pull_request.user_id
155 155 return self.check_user_merge(pull_request, user, api) or owner
156 156
157 157 def check_user_delete(self, pull_request, user):
158 158 owner = user.user_id == pull_request.user_id
159 159 _perms = ('repository.admin',)
160 160 return self._check_perms(_perms, pull_request, user) or owner
161 161
162 162 def check_user_change_status(self, pull_request, user, api=False):
163 163 reviewer = user.user_id in [x.user_id for x in
164 164 pull_request.reviewers]
165 165 return self.check_user_update(pull_request, user, api) or reviewer
166 166
167 167 def check_user_comment(self, pull_request, user):
168 168 owner = user.user_id == pull_request.user_id
169 169 return self.check_user_read(pull_request, user) or owner
170 170
171 171 def get(self, pull_request):
172 172 return self.__get_pull_request(pull_request)
173 173
174 174 def _prepare_get_all_query(self, repo_name, source=False, statuses=None,
175 175 opened_by=None, order_by=None,
176 176 order_dir='desc'):
177 177 repo = None
178 178 if repo_name:
179 179 repo = self._get_repo(repo_name)
180 180
181 181 q = PullRequest.query()
182 182
183 183 # source or target
184 184 if repo and source:
185 185 q = q.filter(PullRequest.source_repo == repo)
186 186 elif repo:
187 187 q = q.filter(PullRequest.target_repo == repo)
188 188
189 189 # closed,opened
190 190 if statuses:
191 191 q = q.filter(PullRequest.status.in_(statuses))
192 192
193 193 # opened by filter
194 194 if opened_by:
195 195 q = q.filter(PullRequest.user_id.in_(opened_by))
196 196
197 197 if order_by:
198 198 order_map = {
199 199 'name_raw': PullRequest.pull_request_id,
200 200 'title': PullRequest.title,
201 201 'updated_on_raw': PullRequest.updated_on,
202 202 'target_repo': PullRequest.target_repo_id
203 203 }
204 204 if order_dir == 'asc':
205 205 q = q.order_by(order_map[order_by].asc())
206 206 else:
207 207 q = q.order_by(order_map[order_by].desc())
208 208
209 209 return q
210 210
211 211 def count_all(self, repo_name, source=False, statuses=None,
212 212 opened_by=None):
213 213 """
214 214 Count the number of pull requests for a specific repository.
215 215
216 216 :param repo_name: target or source repo
217 217 :param source: boolean flag to specify if repo_name refers to source
218 218 :param statuses: list of pull request statuses
219 219 :param opened_by: author user of the pull request
220 220 :returns: int number of pull requests
221 221 """
222 222 q = self._prepare_get_all_query(
223 223 repo_name, source=source, statuses=statuses, opened_by=opened_by)
224 224
225 225 return q.count()
226 226
227 227 def get_all(self, repo_name, source=False, statuses=None, opened_by=None,
228 228 offset=0, length=None, order_by=None, order_dir='desc'):
229 229 """
230 230 Get all pull requests for a specific repository.
231 231
232 232 :param repo_name: target or source repo
233 233 :param source: boolean flag to specify if repo_name refers to source
234 234 :param statuses: list of pull request statuses
235 235 :param opened_by: author user of the pull request
236 236 :param offset: pagination offset
237 237 :param length: length of returned list
238 238 :param order_by: order of the returned list
239 239 :param order_dir: 'asc' or 'desc' ordering direction
240 240 :returns: list of pull requests
241 241 """
242 242 q = self._prepare_get_all_query(
243 243 repo_name, source=source, statuses=statuses, opened_by=opened_by,
244 244 order_by=order_by, order_dir=order_dir)
245 245
246 246 if length:
247 247 pull_requests = q.limit(length).offset(offset).all()
248 248 else:
249 249 pull_requests = q.all()
250 250
251 251 return pull_requests
252 252
253 253 def count_awaiting_review(self, repo_name, source=False, statuses=None,
254 254 opened_by=None):
255 255 """
256 256 Count the number of pull requests for a specific repository that are
257 257 awaiting review.
258 258
259 259 :param repo_name: target or source repo
260 260 :param source: boolean flag to specify if repo_name refers to source
261 261 :param statuses: list of pull request statuses
262 262 :param opened_by: author user of the pull request
263 263 :returns: int number of pull requests
264 264 """
265 265 pull_requests = self.get_awaiting_review(
266 266 repo_name, source=source, statuses=statuses, opened_by=opened_by)
267 267
268 268 return len(pull_requests)
269 269
270 270 def get_awaiting_review(self, repo_name, source=False, statuses=None,
271 271 opened_by=None, offset=0, length=None,
272 272 order_by=None, order_dir='desc'):
273 273 """
274 274 Get all pull requests for a specific repository that are awaiting
275 275 review.
276 276
277 277 :param repo_name: target or source repo
278 278 :param source: boolean flag to specify if repo_name refers to source
279 279 :param statuses: list of pull request statuses
280 280 :param opened_by: author user of the pull request
281 281 :param offset: pagination offset
282 282 :param length: length of returned list
283 283 :param order_by: order of the returned list
284 284 :param order_dir: 'asc' or 'desc' ordering direction
285 285 :returns: list of pull requests
286 286 """
287 287 pull_requests = self.get_all(
288 288 repo_name, source=source, statuses=statuses, opened_by=opened_by,
289 289 order_by=order_by, order_dir=order_dir)
290 290
291 291 _filtered_pull_requests = []
292 292 for pr in pull_requests:
293 293 status = pr.calculated_review_status()
294 294 if status in [ChangesetStatus.STATUS_NOT_REVIEWED,
295 295 ChangesetStatus.STATUS_UNDER_REVIEW]:
296 296 _filtered_pull_requests.append(pr)
297 297 if length:
298 298 return _filtered_pull_requests[offset:offset+length]
299 299 else:
300 300 return _filtered_pull_requests
301 301
302 302 def count_awaiting_my_review(self, repo_name, source=False, statuses=None,
303 303 opened_by=None, user_id=None):
304 304 """
305 305 Count the number of pull requests for a specific repository that are
306 306 awaiting review from a specific user.
307 307
308 308 :param repo_name: target or source repo
309 309 :param source: boolean flag to specify if repo_name refers to source
310 310 :param statuses: list of pull request statuses
311 311 :param opened_by: author user of the pull request
312 312 :param user_id: reviewer user of the pull request
313 313 :returns: int number of pull requests
314 314 """
315 315 pull_requests = self.get_awaiting_my_review(
316 316 repo_name, source=source, statuses=statuses, opened_by=opened_by,
317 317 user_id=user_id)
318 318
319 319 return len(pull_requests)
320 320
321 321 def get_awaiting_my_review(self, repo_name, source=False, statuses=None,
322 322 opened_by=None, user_id=None, offset=0,
323 323 length=None, order_by=None, order_dir='desc'):
324 324 """
325 325 Get all pull requests for a specific repository that are awaiting
326 326 review from a specific user.
327 327
328 328 :param repo_name: target or source repo
329 329 :param source: boolean flag to specify if repo_name refers to source
330 330 :param statuses: list of pull request statuses
331 331 :param opened_by: author user of the pull request
332 332 :param user_id: reviewer user of the pull request
333 333 :param offset: pagination offset
334 334 :param length: length of returned list
335 335 :param order_by: order of the returned list
336 336 :param order_dir: 'asc' or 'desc' ordering direction
337 337 :returns: list of pull requests
338 338 """
339 339 pull_requests = self.get_all(
340 340 repo_name, source=source, statuses=statuses, opened_by=opened_by,
341 341 order_by=order_by, order_dir=order_dir)
342 342
343 343 _my = PullRequestModel().get_not_reviewed(user_id)
344 344 my_participation = []
345 345 for pr in pull_requests:
346 346 if pr in _my:
347 347 my_participation.append(pr)
348 348 _filtered_pull_requests = my_participation
349 349 if length:
350 350 return _filtered_pull_requests[offset:offset+length]
351 351 else:
352 352 return _filtered_pull_requests
353 353
354 354 def get_not_reviewed(self, user_id):
355 355 return [
356 356 x.pull_request for x in PullRequestReviewers.query().filter(
357 357 PullRequestReviewers.user_id == user_id).all()
358 358 ]
359 359
360 360 def _prepare_participating_query(self, user_id=None, statuses=None,
361 361 order_by=None, order_dir='desc'):
362 362 q = PullRequest.query()
363 363 if user_id:
364 364 reviewers_subquery = Session().query(
365 365 PullRequestReviewers.pull_request_id).filter(
366 366 PullRequestReviewers.user_id == user_id).subquery()
367 367 user_filter = or_(
368 368 PullRequest.user_id == user_id,
369 369 PullRequest.pull_request_id.in_(reviewers_subquery)
370 370 )
371 371 q = PullRequest.query().filter(user_filter)
372 372
373 373 # closed,opened
374 374 if statuses:
375 375 q = q.filter(PullRequest.status.in_(statuses))
376 376
377 377 if order_by:
378 378 order_map = {
379 379 'name_raw': PullRequest.pull_request_id,
380 380 'title': PullRequest.title,
381 381 'updated_on_raw': PullRequest.updated_on,
382 382 'target_repo': PullRequest.target_repo_id
383 383 }
384 384 if order_dir == 'asc':
385 385 q = q.order_by(order_map[order_by].asc())
386 386 else:
387 387 q = q.order_by(order_map[order_by].desc())
388 388
389 389 return q
390 390
391 391 def count_im_participating_in(self, user_id=None, statuses=None):
392 392 q = self._prepare_participating_query(user_id, statuses=statuses)
393 393 return q.count()
394 394
395 395 def get_im_participating_in(
396 396 self, user_id=None, statuses=None, offset=0,
397 397 length=None, order_by=None, order_dir='desc'):
398 398 """
399 399 Get all Pull requests that i'm participating in, or i have opened
400 400 """
401 401
402 402 q = self._prepare_participating_query(
403 403 user_id, statuses=statuses, order_by=order_by,
404 404 order_dir=order_dir)
405 405
406 406 if length:
407 407 pull_requests = q.limit(length).offset(offset).all()
408 408 else:
409 409 pull_requests = q.all()
410 410
411 411 return pull_requests
412 412
413 413 def get_versions(self, pull_request):
414 414 """
415 415 returns version of pull request sorted by ID descending
416 416 """
417 417 return PullRequestVersion.query()\
418 418 .filter(PullRequestVersion.pull_request == pull_request)\
419 419 .order_by(PullRequestVersion.pull_request_version_id.asc())\
420 420 .all()
421 421
422 422 def get_pr_version(self, pull_request_id, version=None):
423 423 at_version = None
424 424
425 425 if version and version == 'latest':
426 426 pull_request_ver = PullRequest.get(pull_request_id)
427 427 pull_request_obj = pull_request_ver
428 428 _org_pull_request_obj = pull_request_obj
429 429 at_version = 'latest'
430 430 elif version:
431 431 pull_request_ver = PullRequestVersion.get_or_404(version)
432 432 pull_request_obj = pull_request_ver
433 433 _org_pull_request_obj = pull_request_ver.pull_request
434 434 at_version = pull_request_ver.pull_request_version_id
435 435 else:
436 436 _org_pull_request_obj = pull_request_obj = PullRequest.get_or_404(
437 437 pull_request_id)
438 438
439 439 pull_request_display_obj = PullRequest.get_pr_display_object(
440 440 pull_request_obj, _org_pull_request_obj)
441 441
442 442 return _org_pull_request_obj, pull_request_obj, \
443 443 pull_request_display_obj, at_version
444 444
445 445 def create(self, created_by, source_repo, source_ref, target_repo,
446 446 target_ref, revisions, reviewers, title, description=None,
447 447 reviewer_data=None, translator=None, auth_user=None):
448 448 translator = translator or get_current_request().translate
449 449
450 450 created_by_user = self._get_user(created_by)
451 451 auth_user = auth_user or created_by_user
452 452 source_repo = self._get_repo(source_repo)
453 453 target_repo = self._get_repo(target_repo)
454 454
455 455 pull_request = PullRequest()
456 456 pull_request.source_repo = source_repo
457 457 pull_request.source_ref = source_ref
458 458 pull_request.target_repo = target_repo
459 459 pull_request.target_ref = target_ref
460 460 pull_request.revisions = revisions
461 461 pull_request.title = title
462 462 pull_request.description = description
463 463 pull_request.author = created_by_user
464 464 pull_request.reviewer_data = reviewer_data
465 465
466 466 Session().add(pull_request)
467 467 Session().flush()
468 468
469 469 reviewer_ids = set()
470 470 # members / reviewers
471 471 for reviewer_object in reviewers:
472 472 user_id, reasons, mandatory, rules = reviewer_object
473 473 user = self._get_user(user_id)
474 474
475 475 # skip duplicates
476 476 if user.user_id in reviewer_ids:
477 477 continue
478 478
479 479 reviewer_ids.add(user.user_id)
480 480
481 481 reviewer = PullRequestReviewers()
482 482 reviewer.user = user
483 483 reviewer.pull_request = pull_request
484 484 reviewer.reasons = reasons
485 485 reviewer.mandatory = mandatory
486 486
487 487 # NOTE(marcink): pick only first rule for now
488 488 rule_id = rules[0] if rules else None
489 489 rule = RepoReviewRule.get(rule_id) if rule_id else None
490 490 if rule:
491 491 review_group = rule.user_group_vote_rule()
492 492 if review_group:
493 493 # NOTE(marcink):
494 494 # again, can be that user is member of more,
495 495 # but we pick the first same, as default reviewers algo
496 496 review_group = review_group[0]
497 497
498 498 rule_data = {
499 499 'rule_name':
500 500 rule.review_rule_name,
501 501 'rule_user_group_entry_id':
502 502 review_group.repo_review_rule_users_group_id,
503 503 'rule_user_group_name':
504 504 review_group.users_group.users_group_name,
505 505 'rule_user_group_members':
506 506 [x.user.username for x in review_group.users_group.members],
507 507 }
508 508 # e.g {'vote_rule': -1, 'mandatory': True}
509 509 rule_data.update(review_group.rule_data())
510 510
511 511 reviewer.rule_data = rule_data
512 512
513 513 Session().add(reviewer)
514 514 Session().flush()
515 515
516 516 # Set approval status to "Under Review" for all commits which are
517 517 # part of this pull request.
518 518 ChangesetStatusModel().set_status(
519 519 repo=target_repo,
520 520 status=ChangesetStatus.STATUS_UNDER_REVIEW,
521 521 user=created_by_user,
522 522 pull_request=pull_request
523 523 )
524 524 # we commit early at this point. This has to do with a fact
525 525 # that before queries do some row-locking. And because of that
526 526 # we need to commit and finish transation before below validate call
527 527 # that for large repos could be long resulting in long row locks
528 528 Session().commit()
529 529
530 530 # prepare workspace, and run initial merge simulation
531 531 MergeCheck.validate(
532 532 pull_request, user=created_by_user, translator=translator)
533 533
534 534 self.notify_reviewers(pull_request, reviewer_ids)
535 535 self._trigger_pull_request_hook(
536 536 pull_request, created_by_user, 'create')
537 537
538 538 creation_data = pull_request.get_api_data(with_merge_state=False)
539 539 self._log_audit_action(
540 540 'repo.pull_request.create', {'data': creation_data},
541 541 auth_user, pull_request)
542 542
543 543 return pull_request
544 544
545 545 def _trigger_pull_request_hook(self, pull_request, user, action):
546 546 pull_request = self.__get_pull_request(pull_request)
547 547 target_scm = pull_request.target_repo.scm_instance()
548 548 if action == 'create':
549 549 trigger_hook = hooks_utils.trigger_log_create_pull_request_hook
550 550 elif action == 'merge':
551 551 trigger_hook = hooks_utils.trigger_log_merge_pull_request_hook
552 552 elif action == 'close':
553 553 trigger_hook = hooks_utils.trigger_log_close_pull_request_hook
554 554 elif action == 'review_status_change':
555 555 trigger_hook = hooks_utils.trigger_log_review_pull_request_hook
556 556 elif action == 'update':
557 557 trigger_hook = hooks_utils.trigger_log_update_pull_request_hook
558 558 else:
559 559 return
560 560
561 561 trigger_hook(
562 562 username=user.username,
563 563 repo_name=pull_request.target_repo.repo_name,
564 564 repo_alias=target_scm.alias,
565 565 pull_request=pull_request)
566 566
567 567 def _get_commit_ids(self, pull_request):
568 568 """
569 569 Return the commit ids of the merged pull request.
570 570
571 571 This method is not dealing correctly yet with the lack of autoupdates
572 572 nor with the implicit target updates.
573 573 For example: if a commit in the source repo is already in the target it
574 574 will be reported anyways.
575 575 """
576 576 merge_rev = pull_request.merge_rev
577 577 if merge_rev is None:
578 578 raise ValueError('This pull request was not merged yet')
579 579
580 580 commit_ids = list(pull_request.revisions)
581 581 if merge_rev not in commit_ids:
582 582 commit_ids.append(merge_rev)
583 583
584 584 return commit_ids
585 585
586 586 def merge_repo(self, pull_request, user, extras):
587 587 log.debug("Merging pull request %s", pull_request.pull_request_id)
588 588 merge_state = self._merge_pull_request(pull_request, user, extras)
589 589 if merge_state.executed:
590 590 log.debug(
591 591 "Merge was successful, updating the pull request comments.")
592 592 self._comment_and_close_pr(pull_request, user, merge_state)
593 593
594 594 self._log_audit_action(
595 595 'repo.pull_request.merge',
596 596 {'merge_state': merge_state.__dict__},
597 597 user, pull_request)
598 598
599 599 else:
600 600 log.warn("Merge failed, not updating the pull request.")
601 601 return merge_state
602 602
603 603 def _merge_pull_request(self, pull_request, user, extras, merge_msg=None):
604 604 target_vcs = pull_request.target_repo.scm_instance()
605 605 source_vcs = pull_request.source_repo.scm_instance()
606 606 target_ref = self._refresh_reference(
607 607 pull_request.target_ref_parts, target_vcs)
608 608
609 609 message = merge_msg or (
610 610 'Merge pull request #%(pr_id)s from '
611 611 '%(source_repo)s %(source_ref_name)s\n\n %(pr_title)s') % {
612 612 'pr_id': pull_request.pull_request_id,
613 613 'source_repo': source_vcs.name,
614 614 'source_ref_name': pull_request.source_ref_parts.name,
615 615 'pr_title': pull_request.title
616 616 }
617 617
618 618 workspace_id = self._workspace_id(pull_request)
619 619 repo_id = pull_request.target_repo.repo_id
620 620 use_rebase = self._use_rebase_for_merging(pull_request)
621 621 close_branch = self._close_branch_before_merging(pull_request)
622 622
623 623 callback_daemon, extras = prepare_callback_daemon(
624 624 extras, protocol=vcs_settings.HOOKS_PROTOCOL,
625 host=vcs_settings.HOOKS_HOST,
625 626 use_direct_calls=vcs_settings.HOOKS_DIRECT_CALLS)
626 627
627 628 with callback_daemon:
628 629 # TODO: johbo: Implement a clean way to run a config_override
629 630 # for a single call.
630 631 target_vcs.config.set(
631 632 'rhodecode', 'RC_SCM_DATA', json.dumps(extras))
632 633 merge_state = target_vcs.merge(
633 634 repo_id, workspace_id, target_ref, source_vcs,
634 635 pull_request.source_ref_parts,
635 636 user_name=user.username, user_email=user.email,
636 637 message=message, use_rebase=use_rebase,
637 638 close_branch=close_branch)
638 639 return merge_state
639 640
640 641 def _comment_and_close_pr(self, pull_request, user, merge_state, close_msg=None):
641 642 pull_request.merge_rev = merge_state.merge_ref.commit_id
642 643 pull_request.updated_on = datetime.datetime.now()
643 644 close_msg = close_msg or 'Pull request merged and closed'
644 645
645 646 CommentsModel().create(
646 647 text=safe_unicode(close_msg),
647 648 repo=pull_request.target_repo.repo_id,
648 649 user=user.user_id,
649 650 pull_request=pull_request.pull_request_id,
650 651 f_path=None,
651 652 line_no=None,
652 653 closing_pr=True
653 654 )
654 655
655 656 Session().add(pull_request)
656 657 Session().flush()
657 658 # TODO: paris: replace invalidation with less radical solution
658 659 ScmModel().mark_for_invalidation(
659 660 pull_request.target_repo.repo_name)
660 661 self._trigger_pull_request_hook(pull_request, user, 'merge')
661 662
662 663 def has_valid_update_type(self, pull_request):
663 664 source_ref_type = pull_request.source_ref_parts.type
664 665 return source_ref_type in ['book', 'branch', 'tag']
665 666
666 667 def update_commits(self, pull_request):
667 668 """
668 669 Get the updated list of commits for the pull request
669 670 and return the new pull request version and the list
670 671 of commits processed by this update action
671 672 """
672 673 pull_request = self.__get_pull_request(pull_request)
673 674 source_ref_type = pull_request.source_ref_parts.type
674 675 source_ref_name = pull_request.source_ref_parts.name
675 676 source_ref_id = pull_request.source_ref_parts.commit_id
676 677
677 678 target_ref_type = pull_request.target_ref_parts.type
678 679 target_ref_name = pull_request.target_ref_parts.name
679 680 target_ref_id = pull_request.target_ref_parts.commit_id
680 681
681 682 if not self.has_valid_update_type(pull_request):
682 683 log.debug(
683 684 "Skipping update of pull request %s due to ref type: %s",
684 685 pull_request, source_ref_type)
685 686 return UpdateResponse(
686 687 executed=False,
687 688 reason=UpdateFailureReason.WRONG_REF_TYPE,
688 689 old=pull_request, new=None, changes=None,
689 690 source_changed=False, target_changed=False)
690 691
691 692 # source repo
692 693 source_repo = pull_request.source_repo.scm_instance()
693 694 try:
694 695 source_commit = source_repo.get_commit(commit_id=source_ref_name)
695 696 except CommitDoesNotExistError:
696 697 return UpdateResponse(
697 698 executed=False,
698 699 reason=UpdateFailureReason.MISSING_SOURCE_REF,
699 700 old=pull_request, new=None, changes=None,
700 701 source_changed=False, target_changed=False)
701 702
702 703 source_changed = source_ref_id != source_commit.raw_id
703 704
704 705 # target repo
705 706 target_repo = pull_request.target_repo.scm_instance()
706 707 try:
707 708 target_commit = target_repo.get_commit(commit_id=target_ref_name)
708 709 except CommitDoesNotExistError:
709 710 return UpdateResponse(
710 711 executed=False,
711 712 reason=UpdateFailureReason.MISSING_TARGET_REF,
712 713 old=pull_request, new=None, changes=None,
713 714 source_changed=False, target_changed=False)
714 715 target_changed = target_ref_id != target_commit.raw_id
715 716
716 717 if not (source_changed or target_changed):
717 718 log.debug("Nothing changed in pull request %s", pull_request)
718 719 return UpdateResponse(
719 720 executed=False,
720 721 reason=UpdateFailureReason.NO_CHANGE,
721 722 old=pull_request, new=None, changes=None,
722 723 source_changed=target_changed, target_changed=source_changed)
723 724
724 725 change_in_found = 'target repo' if target_changed else 'source repo'
725 726 log.debug('Updating pull request because of change in %s detected',
726 727 change_in_found)
727 728
728 729 # Finally there is a need for an update, in case of source change
729 730 # we create a new version, else just an update
730 731 if source_changed:
731 732 pull_request_version = self._create_version_from_snapshot(pull_request)
732 733 self._link_comments_to_version(pull_request_version)
733 734 else:
734 735 try:
735 736 ver = pull_request.versions[-1]
736 737 except IndexError:
737 738 ver = None
738 739
739 740 pull_request.pull_request_version_id = \
740 741 ver.pull_request_version_id if ver else None
741 742 pull_request_version = pull_request
742 743
743 744 try:
744 745 if target_ref_type in ('tag', 'branch', 'book'):
745 746 target_commit = target_repo.get_commit(target_ref_name)
746 747 else:
747 748 target_commit = target_repo.get_commit(target_ref_id)
748 749 except CommitDoesNotExistError:
749 750 return UpdateResponse(
750 751 executed=False,
751 752 reason=UpdateFailureReason.MISSING_TARGET_REF,
752 753 old=pull_request, new=None, changes=None,
753 754 source_changed=source_changed, target_changed=target_changed)
754 755
755 756 # re-compute commit ids
756 757 old_commit_ids = pull_request.revisions
757 758 pre_load = ["author", "branch", "date", "message"]
758 759 commit_ranges = target_repo.compare(
759 760 target_commit.raw_id, source_commit.raw_id, source_repo, merge=True,
760 761 pre_load=pre_load)
761 762
762 763 ancestor = target_repo.get_common_ancestor(
763 764 target_commit.raw_id, source_commit.raw_id, source_repo)
764 765
765 766 pull_request.source_ref = '%s:%s:%s' % (
766 767 source_ref_type, source_ref_name, source_commit.raw_id)
767 768 pull_request.target_ref = '%s:%s:%s' % (
768 769 target_ref_type, target_ref_name, ancestor)
769 770
770 771 pull_request.revisions = [
771 772 commit.raw_id for commit in reversed(commit_ranges)]
772 773 pull_request.updated_on = datetime.datetime.now()
773 774 Session().add(pull_request)
774 775 new_commit_ids = pull_request.revisions
775 776
776 777 old_diff_data, new_diff_data = self._generate_update_diffs(
777 778 pull_request, pull_request_version)
778 779
779 780 # calculate commit and file changes
780 781 changes = self._calculate_commit_id_changes(
781 782 old_commit_ids, new_commit_ids)
782 783 file_changes = self._calculate_file_changes(
783 784 old_diff_data, new_diff_data)
784 785
785 786 # set comments as outdated if DIFFS changed
786 787 CommentsModel().outdate_comments(
787 788 pull_request, old_diff_data=old_diff_data,
788 789 new_diff_data=new_diff_data)
789 790
790 791 commit_changes = (changes.added or changes.removed)
791 792 file_node_changes = (
792 793 file_changes.added or file_changes.modified or file_changes.removed)
793 794 pr_has_changes = commit_changes or file_node_changes
794 795
795 796 # Add an automatic comment to the pull request, in case
796 797 # anything has changed
797 798 if pr_has_changes:
798 799 update_comment = CommentsModel().create(
799 800 text=self._render_update_message(changes, file_changes),
800 801 repo=pull_request.target_repo,
801 802 user=pull_request.author,
802 803 pull_request=pull_request,
803 804 send_email=False, renderer=DEFAULT_COMMENTS_RENDERER)
804 805
805 806 # Update status to "Under Review" for added commits
806 807 for commit_id in changes.added:
807 808 ChangesetStatusModel().set_status(
808 809 repo=pull_request.source_repo,
809 810 status=ChangesetStatus.STATUS_UNDER_REVIEW,
810 811 comment=update_comment,
811 812 user=pull_request.author,
812 813 pull_request=pull_request,
813 814 revision=commit_id)
814 815
815 816 log.debug(
816 817 'Updated pull request %s, added_ids: %s, common_ids: %s, '
817 818 'removed_ids: %s', pull_request.pull_request_id,
818 819 changes.added, changes.common, changes.removed)
819 820 log.debug(
820 821 'Updated pull request with the following file changes: %s',
821 822 file_changes)
822 823
823 824 log.info(
824 825 "Updated pull request %s from commit %s to commit %s, "
825 826 "stored new version %s of this pull request.",
826 827 pull_request.pull_request_id, source_ref_id,
827 828 pull_request.source_ref_parts.commit_id,
828 829 pull_request_version.pull_request_version_id)
829 830 Session().commit()
830 831 self._trigger_pull_request_hook(
831 832 pull_request, pull_request.author, 'update')
832 833
833 834 return UpdateResponse(
834 835 executed=True, reason=UpdateFailureReason.NONE,
835 836 old=pull_request, new=pull_request_version, changes=changes,
836 837 source_changed=source_changed, target_changed=target_changed)
837 838
838 839 def _create_version_from_snapshot(self, pull_request):
839 840 version = PullRequestVersion()
840 841 version.title = pull_request.title
841 842 version.description = pull_request.description
842 843 version.status = pull_request.status
843 844 version.created_on = datetime.datetime.now()
844 845 version.updated_on = pull_request.updated_on
845 846 version.user_id = pull_request.user_id
846 847 version.source_repo = pull_request.source_repo
847 848 version.source_ref = pull_request.source_ref
848 849 version.target_repo = pull_request.target_repo
849 850 version.target_ref = pull_request.target_ref
850 851
851 852 version._last_merge_source_rev = pull_request._last_merge_source_rev
852 853 version._last_merge_target_rev = pull_request._last_merge_target_rev
853 854 version.last_merge_status = pull_request.last_merge_status
854 855 version.shadow_merge_ref = pull_request.shadow_merge_ref
855 856 version.merge_rev = pull_request.merge_rev
856 857 version.reviewer_data = pull_request.reviewer_data
857 858
858 859 version.revisions = pull_request.revisions
859 860 version.pull_request = pull_request
860 861 Session().add(version)
861 862 Session().flush()
862 863
863 864 return version
864 865
865 866 def _generate_update_diffs(self, pull_request, pull_request_version):
866 867
867 868 diff_context = (
868 869 self.DIFF_CONTEXT +
869 870 CommentsModel.needed_extra_diff_context())
870 871
871 872 source_repo = pull_request_version.source_repo
872 873 source_ref_id = pull_request_version.source_ref_parts.commit_id
873 874 target_ref_id = pull_request_version.target_ref_parts.commit_id
874 875 old_diff = self._get_diff_from_pr_or_version(
875 876 source_repo, source_ref_id, target_ref_id, context=diff_context)
876 877
877 878 source_repo = pull_request.source_repo
878 879 source_ref_id = pull_request.source_ref_parts.commit_id
879 880 target_ref_id = pull_request.target_ref_parts.commit_id
880 881
881 882 new_diff = self._get_diff_from_pr_or_version(
882 883 source_repo, source_ref_id, target_ref_id, context=diff_context)
883 884
884 885 old_diff_data = diffs.DiffProcessor(old_diff)
885 886 old_diff_data.prepare()
886 887 new_diff_data = diffs.DiffProcessor(new_diff)
887 888 new_diff_data.prepare()
888 889
889 890 return old_diff_data, new_diff_data
890 891
891 892 def _link_comments_to_version(self, pull_request_version):
892 893 """
893 894 Link all unlinked comments of this pull request to the given version.
894 895
895 896 :param pull_request_version: The `PullRequestVersion` to which
896 897 the comments shall be linked.
897 898
898 899 """
899 900 pull_request = pull_request_version.pull_request
900 901 comments = ChangesetComment.query()\
901 902 .filter(
902 903 # TODO: johbo: Should we query for the repo at all here?
903 904 # Pending decision on how comments of PRs are to be related
904 905 # to either the source repo, the target repo or no repo at all.
905 906 ChangesetComment.repo_id == pull_request.target_repo.repo_id,
906 907 ChangesetComment.pull_request == pull_request,
907 908 ChangesetComment.pull_request_version == None)\
908 909 .order_by(ChangesetComment.comment_id.asc())
909 910
910 911 # TODO: johbo: Find out why this breaks if it is done in a bulk
911 912 # operation.
912 913 for comment in comments:
913 914 comment.pull_request_version_id = (
914 915 pull_request_version.pull_request_version_id)
915 916 Session().add(comment)
916 917
917 918 def _calculate_commit_id_changes(self, old_ids, new_ids):
918 919 added = [x for x in new_ids if x not in old_ids]
919 920 common = [x for x in new_ids if x in old_ids]
920 921 removed = [x for x in old_ids if x not in new_ids]
921 922 total = new_ids
922 923 return ChangeTuple(added, common, removed, total)
923 924
924 925 def _calculate_file_changes(self, old_diff_data, new_diff_data):
925 926
926 927 old_files = OrderedDict()
927 928 for diff_data in old_diff_data.parsed_diff:
928 929 old_files[diff_data['filename']] = md5_safe(diff_data['raw_diff'])
929 930
930 931 added_files = []
931 932 modified_files = []
932 933 removed_files = []
933 934 for diff_data in new_diff_data.parsed_diff:
934 935 new_filename = diff_data['filename']
935 936 new_hash = md5_safe(diff_data['raw_diff'])
936 937
937 938 old_hash = old_files.get(new_filename)
938 939 if not old_hash:
939 940 # file is not present in old diff, means it's added
940 941 added_files.append(new_filename)
941 942 else:
942 943 if new_hash != old_hash:
943 944 modified_files.append(new_filename)
944 945 # now remove a file from old, since we have seen it already
945 946 del old_files[new_filename]
946 947
947 948 # removed files is when there are present in old, but not in NEW,
948 949 # since we remove old files that are present in new diff, left-overs
949 950 # if any should be the removed files
950 951 removed_files.extend(old_files.keys())
951 952
952 953 return FileChangeTuple(added_files, modified_files, removed_files)
953 954
954 955 def _render_update_message(self, changes, file_changes):
955 956 """
956 957 render the message using DEFAULT_COMMENTS_RENDERER (RST renderer),
957 958 so it's always looking the same disregarding on which default
958 959 renderer system is using.
959 960
960 961 :param changes: changes named tuple
961 962 :param file_changes: file changes named tuple
962 963
963 964 """
964 965 new_status = ChangesetStatus.get_status_lbl(
965 966 ChangesetStatus.STATUS_UNDER_REVIEW)
966 967
967 968 changed_files = (
968 969 file_changes.added + file_changes.modified + file_changes.removed)
969 970
970 971 params = {
971 972 'under_review_label': new_status,
972 973 'added_commits': changes.added,
973 974 'removed_commits': changes.removed,
974 975 'changed_files': changed_files,
975 976 'added_files': file_changes.added,
976 977 'modified_files': file_changes.modified,
977 978 'removed_files': file_changes.removed,
978 979 }
979 980 renderer = RstTemplateRenderer()
980 981 return renderer.render('pull_request_update.mako', **params)
981 982
982 983 def edit(self, pull_request, title, description, user):
983 984 pull_request = self.__get_pull_request(pull_request)
984 985 old_data = pull_request.get_api_data(with_merge_state=False)
985 986 if pull_request.is_closed():
986 987 raise ValueError('This pull request is closed')
987 988 if title:
988 989 pull_request.title = title
989 990 pull_request.description = description
990 991 pull_request.updated_on = datetime.datetime.now()
991 992 Session().add(pull_request)
992 993 self._log_audit_action(
993 994 'repo.pull_request.edit', {'old_data': old_data},
994 995 user, pull_request)
995 996
996 997 def update_reviewers(self, pull_request, reviewer_data, user):
997 998 """
998 999 Update the reviewers in the pull request
999 1000
1000 1001 :param pull_request: the pr to update
1001 1002 :param reviewer_data: list of tuples
1002 1003 [(user, ['reason1', 'reason2'], mandatory_flag, [rules])]
1003 1004 """
1004 1005 pull_request = self.__get_pull_request(pull_request)
1005 1006 if pull_request.is_closed():
1006 1007 raise ValueError('This pull request is closed')
1007 1008
1008 1009 reviewers = {}
1009 1010 for user_id, reasons, mandatory, rules in reviewer_data:
1010 1011 if isinstance(user_id, (int, basestring)):
1011 1012 user_id = self._get_user(user_id).user_id
1012 1013 reviewers[user_id] = {
1013 1014 'reasons': reasons, 'mandatory': mandatory}
1014 1015
1015 1016 reviewers_ids = set(reviewers.keys())
1016 1017 current_reviewers = PullRequestReviewers.query()\
1017 1018 .filter(PullRequestReviewers.pull_request ==
1018 1019 pull_request).all()
1019 1020 current_reviewers_ids = set([x.user.user_id for x in current_reviewers])
1020 1021
1021 1022 ids_to_add = reviewers_ids.difference(current_reviewers_ids)
1022 1023 ids_to_remove = current_reviewers_ids.difference(reviewers_ids)
1023 1024
1024 1025 log.debug("Adding %s reviewers", ids_to_add)
1025 1026 log.debug("Removing %s reviewers", ids_to_remove)
1026 1027 changed = False
1027 1028 for uid in ids_to_add:
1028 1029 changed = True
1029 1030 _usr = self._get_user(uid)
1030 1031 reviewer = PullRequestReviewers()
1031 1032 reviewer.user = _usr
1032 1033 reviewer.pull_request = pull_request
1033 1034 reviewer.reasons = reviewers[uid]['reasons']
1034 1035 # NOTE(marcink): mandatory shouldn't be changed now
1035 1036 # reviewer.mandatory = reviewers[uid]['reasons']
1036 1037 Session().add(reviewer)
1037 1038 self._log_audit_action(
1038 1039 'repo.pull_request.reviewer.add', {'data': reviewer.get_dict()},
1039 1040 user, pull_request)
1040 1041
1041 1042 for uid in ids_to_remove:
1042 1043 changed = True
1043 1044 reviewers = PullRequestReviewers.query()\
1044 1045 .filter(PullRequestReviewers.user_id == uid,
1045 1046 PullRequestReviewers.pull_request == pull_request)\
1046 1047 .all()
1047 1048 # use .all() in case we accidentally added the same person twice
1048 1049 # this CAN happen due to the lack of DB checks
1049 1050 for obj in reviewers:
1050 1051 old_data = obj.get_dict()
1051 1052 Session().delete(obj)
1052 1053 self._log_audit_action(
1053 1054 'repo.pull_request.reviewer.delete',
1054 1055 {'old_data': old_data}, user, pull_request)
1055 1056
1056 1057 if changed:
1057 1058 pull_request.updated_on = datetime.datetime.now()
1058 1059 Session().add(pull_request)
1059 1060
1060 1061 self.notify_reviewers(pull_request, ids_to_add)
1061 1062 return ids_to_add, ids_to_remove
1062 1063
1063 1064 def get_url(self, pull_request, request=None, permalink=False):
1064 1065 if not request:
1065 1066 request = get_current_request()
1066 1067
1067 1068 if permalink:
1068 1069 return request.route_url(
1069 1070 'pull_requests_global',
1070 1071 pull_request_id=pull_request.pull_request_id,)
1071 1072 else:
1072 1073 return request.route_url('pullrequest_show',
1073 1074 repo_name=safe_str(pull_request.target_repo.repo_name),
1074 1075 pull_request_id=pull_request.pull_request_id,)
1075 1076
1076 1077 def get_shadow_clone_url(self, pull_request, request=None):
1077 1078 """
1078 1079 Returns qualified url pointing to the shadow repository. If this pull
1079 1080 request is closed there is no shadow repository and ``None`` will be
1080 1081 returned.
1081 1082 """
1082 1083 if pull_request.is_closed():
1083 1084 return None
1084 1085 else:
1085 1086 pr_url = urllib.unquote(self.get_url(pull_request, request=request))
1086 1087 return safe_unicode('{pr_url}/repository'.format(pr_url=pr_url))
1087 1088
1088 1089 def notify_reviewers(self, pull_request, reviewers_ids):
1089 1090 # notification to reviewers
1090 1091 if not reviewers_ids:
1091 1092 return
1092 1093
1093 1094 pull_request_obj = pull_request
1094 1095 # get the current participants of this pull request
1095 1096 recipients = reviewers_ids
1096 1097 notification_type = EmailNotificationModel.TYPE_PULL_REQUEST
1097 1098
1098 1099 pr_source_repo = pull_request_obj.source_repo
1099 1100 pr_target_repo = pull_request_obj.target_repo
1100 1101
1101 1102 pr_url = h.route_url('pullrequest_show',
1102 1103 repo_name=pr_target_repo.repo_name,
1103 1104 pull_request_id=pull_request_obj.pull_request_id,)
1104 1105
1105 1106 # set some variables for email notification
1106 1107 pr_target_repo_url = h.route_url(
1107 1108 'repo_summary', repo_name=pr_target_repo.repo_name)
1108 1109
1109 1110 pr_source_repo_url = h.route_url(
1110 1111 'repo_summary', repo_name=pr_source_repo.repo_name)
1111 1112
1112 1113 # pull request specifics
1113 1114 pull_request_commits = [
1114 1115 (x.raw_id, x.message)
1115 1116 for x in map(pr_source_repo.get_commit, pull_request.revisions)]
1116 1117
1117 1118 kwargs = {
1118 1119 'user': pull_request.author,
1119 1120 'pull_request': pull_request_obj,
1120 1121 'pull_request_commits': pull_request_commits,
1121 1122
1122 1123 'pull_request_target_repo': pr_target_repo,
1123 1124 'pull_request_target_repo_url': pr_target_repo_url,
1124 1125
1125 1126 'pull_request_source_repo': pr_source_repo,
1126 1127 'pull_request_source_repo_url': pr_source_repo_url,
1127 1128
1128 1129 'pull_request_url': pr_url,
1129 1130 }
1130 1131
1131 1132 # pre-generate the subject for notification itself
1132 1133 (subject,
1133 1134 _h, _e, # we don't care about those
1134 1135 body_plaintext) = EmailNotificationModel().render_email(
1135 1136 notification_type, **kwargs)
1136 1137
1137 1138 # create notification objects, and emails
1138 1139 NotificationModel().create(
1139 1140 created_by=pull_request.author,
1140 1141 notification_subject=subject,
1141 1142 notification_body=body_plaintext,
1142 1143 notification_type=notification_type,
1143 1144 recipients=recipients,
1144 1145 email_kwargs=kwargs,
1145 1146 )
1146 1147
1147 1148 def delete(self, pull_request, user):
1148 1149 pull_request = self.__get_pull_request(pull_request)
1149 1150 old_data = pull_request.get_api_data(with_merge_state=False)
1150 1151 self._cleanup_merge_workspace(pull_request)
1151 1152 self._log_audit_action(
1152 1153 'repo.pull_request.delete', {'old_data': old_data},
1153 1154 user, pull_request)
1154 1155 Session().delete(pull_request)
1155 1156
1156 1157 def close_pull_request(self, pull_request, user):
1157 1158 pull_request = self.__get_pull_request(pull_request)
1158 1159 self._cleanup_merge_workspace(pull_request)
1159 1160 pull_request.status = PullRequest.STATUS_CLOSED
1160 1161 pull_request.updated_on = datetime.datetime.now()
1161 1162 Session().add(pull_request)
1162 1163 self._trigger_pull_request_hook(
1163 1164 pull_request, pull_request.author, 'close')
1164 1165
1165 1166 pr_data = pull_request.get_api_data(with_merge_state=False)
1166 1167 self._log_audit_action(
1167 1168 'repo.pull_request.close', {'data': pr_data}, user, pull_request)
1168 1169
1169 1170 def close_pull_request_with_comment(
1170 1171 self, pull_request, user, repo, message=None):
1171 1172
1172 1173 pull_request_review_status = pull_request.calculated_review_status()
1173 1174
1174 1175 if pull_request_review_status == ChangesetStatus.STATUS_APPROVED:
1175 1176 # approved only if we have voting consent
1176 1177 status = ChangesetStatus.STATUS_APPROVED
1177 1178 else:
1178 1179 status = ChangesetStatus.STATUS_REJECTED
1179 1180 status_lbl = ChangesetStatus.get_status_lbl(status)
1180 1181
1181 1182 default_message = (
1182 1183 'Closing with status change {transition_icon} {status}.'
1183 1184 ).format(transition_icon='>', status=status_lbl)
1184 1185 text = message or default_message
1185 1186
1186 1187 # create a comment, and link it to new status
1187 1188 comment = CommentsModel().create(
1188 1189 text=text,
1189 1190 repo=repo.repo_id,
1190 1191 user=user.user_id,
1191 1192 pull_request=pull_request.pull_request_id,
1192 1193 status_change=status_lbl,
1193 1194 status_change_type=status,
1194 1195 closing_pr=True
1195 1196 )
1196 1197
1197 1198 # calculate old status before we change it
1198 1199 old_calculated_status = pull_request.calculated_review_status()
1199 1200 ChangesetStatusModel().set_status(
1200 1201 repo.repo_id,
1201 1202 status,
1202 1203 user.user_id,
1203 1204 comment=comment,
1204 1205 pull_request=pull_request.pull_request_id
1205 1206 )
1206 1207
1207 1208 Session().flush()
1208 1209 events.trigger(events.PullRequestCommentEvent(pull_request, comment))
1209 1210 # we now calculate the status of pull request again, and based on that
1210 1211 # calculation trigger status change. This might happen in cases
1211 1212 # that non-reviewer admin closes a pr, which means his vote doesn't
1212 1213 # change the status, while if he's a reviewer this might change it.
1213 1214 calculated_status = pull_request.calculated_review_status()
1214 1215 if old_calculated_status != calculated_status:
1215 1216 self._trigger_pull_request_hook(
1216 1217 pull_request, user, 'review_status_change')
1217 1218
1218 1219 # finally close the PR
1219 1220 PullRequestModel().close_pull_request(
1220 1221 pull_request.pull_request_id, user)
1221 1222
1222 1223 return comment, status
1223 1224
1224 1225 def merge_status(self, pull_request, translator=None,
1225 1226 force_shadow_repo_refresh=False):
1226 1227 _ = translator or get_current_request().translate
1227 1228
1228 1229 if not self._is_merge_enabled(pull_request):
1229 1230 return False, _('Server-side pull request merging is disabled.')
1230 1231 if pull_request.is_closed():
1231 1232 return False, _('This pull request is closed.')
1232 1233 merge_possible, msg = self._check_repo_requirements(
1233 1234 target=pull_request.target_repo, source=pull_request.source_repo,
1234 1235 translator=_)
1235 1236 if not merge_possible:
1236 1237 return merge_possible, msg
1237 1238
1238 1239 try:
1239 1240 resp = self._try_merge(
1240 1241 pull_request,
1241 1242 force_shadow_repo_refresh=force_shadow_repo_refresh)
1242 1243 log.debug("Merge response: %s", resp)
1243 1244 status = resp.possible, self.merge_status_message(
1244 1245 resp.failure_reason)
1245 1246 except NotImplementedError:
1246 1247 status = False, _('Pull request merging is not supported.')
1247 1248
1248 1249 return status
1249 1250
1250 1251 def _check_repo_requirements(self, target, source, translator):
1251 1252 """
1252 1253 Check if `target` and `source` have compatible requirements.
1253 1254
1254 1255 Currently this is just checking for largefiles.
1255 1256 """
1256 1257 _ = translator
1257 1258 target_has_largefiles = self._has_largefiles(target)
1258 1259 source_has_largefiles = self._has_largefiles(source)
1259 1260 merge_possible = True
1260 1261 message = u''
1261 1262
1262 1263 if target_has_largefiles != source_has_largefiles:
1263 1264 merge_possible = False
1264 1265 if source_has_largefiles:
1265 1266 message = _(
1266 1267 'Target repository large files support is disabled.')
1267 1268 else:
1268 1269 message = _(
1269 1270 'Source repository large files support is disabled.')
1270 1271
1271 1272 return merge_possible, message
1272 1273
1273 1274 def _has_largefiles(self, repo):
1274 1275 largefiles_ui = VcsSettingsModel(repo=repo).get_ui_settings(
1275 1276 'extensions', 'largefiles')
1276 1277 return largefiles_ui and largefiles_ui[0].active
1277 1278
1278 1279 def _try_merge(self, pull_request, force_shadow_repo_refresh=False):
1279 1280 """
1280 1281 Try to merge the pull request and return the merge status.
1281 1282 """
1282 1283 log.debug(
1283 1284 "Trying out if the pull request %s can be merged. Force_refresh=%s",
1284 1285 pull_request.pull_request_id, force_shadow_repo_refresh)
1285 1286 target_vcs = pull_request.target_repo.scm_instance()
1286 1287
1287 1288 # Refresh the target reference.
1288 1289 try:
1289 1290 target_ref = self._refresh_reference(
1290 1291 pull_request.target_ref_parts, target_vcs)
1291 1292 except CommitDoesNotExistError:
1292 1293 merge_state = MergeResponse(
1293 1294 False, False, None, MergeFailureReason.MISSING_TARGET_REF)
1294 1295 return merge_state
1295 1296
1296 1297 target_locked = pull_request.target_repo.locked
1297 1298 if target_locked and target_locked[0]:
1298 1299 log.debug("The target repository is locked.")
1299 1300 merge_state = MergeResponse(
1300 1301 False, False, None, MergeFailureReason.TARGET_IS_LOCKED)
1301 1302 elif force_shadow_repo_refresh or self._needs_merge_state_refresh(
1302 1303 pull_request, target_ref):
1303 1304 log.debug("Refreshing the merge status of the repository.")
1304 1305 merge_state = self._refresh_merge_state(
1305 1306 pull_request, target_vcs, target_ref)
1306 1307 else:
1307 1308 possible = pull_request.\
1308 1309 last_merge_status == MergeFailureReason.NONE
1309 1310 merge_state = MergeResponse(
1310 1311 possible, False, None, pull_request.last_merge_status)
1311 1312
1312 1313 return merge_state
1313 1314
1314 1315 def _refresh_reference(self, reference, vcs_repository):
1315 1316 if reference.type in ('branch', 'book'):
1316 1317 name_or_id = reference.name
1317 1318 else:
1318 1319 name_or_id = reference.commit_id
1319 1320 refreshed_commit = vcs_repository.get_commit(name_or_id)
1320 1321 refreshed_reference = Reference(
1321 1322 reference.type, reference.name, refreshed_commit.raw_id)
1322 1323 return refreshed_reference
1323 1324
1324 1325 def _needs_merge_state_refresh(self, pull_request, target_reference):
1325 1326 return not(
1326 1327 pull_request.revisions and
1327 1328 pull_request.revisions[0] == pull_request._last_merge_source_rev and
1328 1329 target_reference.commit_id == pull_request._last_merge_target_rev)
1329 1330
1330 1331 def _refresh_merge_state(self, pull_request, target_vcs, target_reference):
1331 1332 workspace_id = self._workspace_id(pull_request)
1332 1333 source_vcs = pull_request.source_repo.scm_instance()
1333 1334 repo_id = pull_request.target_repo.repo_id
1334 1335 use_rebase = self._use_rebase_for_merging(pull_request)
1335 1336 close_branch = self._close_branch_before_merging(pull_request)
1336 1337 merge_state = target_vcs.merge(
1337 1338 repo_id, workspace_id,
1338 1339 target_reference, source_vcs, pull_request.source_ref_parts,
1339 1340 dry_run=True, use_rebase=use_rebase,
1340 1341 close_branch=close_branch)
1341 1342
1342 1343 # Do not store the response if there was an unknown error.
1343 1344 if merge_state.failure_reason != MergeFailureReason.UNKNOWN:
1344 1345 pull_request._last_merge_source_rev = \
1345 1346 pull_request.source_ref_parts.commit_id
1346 1347 pull_request._last_merge_target_rev = target_reference.commit_id
1347 1348 pull_request.last_merge_status = merge_state.failure_reason
1348 1349 pull_request.shadow_merge_ref = merge_state.merge_ref
1349 1350 Session().add(pull_request)
1350 1351 Session().commit()
1351 1352
1352 1353 return merge_state
1353 1354
1354 1355 def _workspace_id(self, pull_request):
1355 1356 workspace_id = 'pr-%s' % pull_request.pull_request_id
1356 1357 return workspace_id
1357 1358
1358 1359 def merge_status_message(self, status_code):
1359 1360 """
1360 1361 Return a human friendly error message for the given merge status code.
1361 1362 """
1362 1363 return self.MERGE_STATUS_MESSAGES[status_code]
1363 1364
1364 1365 def generate_repo_data(self, repo, commit_id=None, branch=None,
1365 1366 bookmark=None, translator=None):
1366 1367 from rhodecode.model.repo import RepoModel
1367 1368
1368 1369 all_refs, selected_ref = \
1369 1370 self._get_repo_pullrequest_sources(
1370 1371 repo.scm_instance(), commit_id=commit_id,
1371 1372 branch=branch, bookmark=bookmark, translator=translator)
1372 1373
1373 1374 refs_select2 = []
1374 1375 for element in all_refs:
1375 1376 children = [{'id': x[0], 'text': x[1]} for x in element[0]]
1376 1377 refs_select2.append({'text': element[1], 'children': children})
1377 1378
1378 1379 return {
1379 1380 'user': {
1380 1381 'user_id': repo.user.user_id,
1381 1382 'username': repo.user.username,
1382 1383 'firstname': repo.user.first_name,
1383 1384 'lastname': repo.user.last_name,
1384 1385 'gravatar_link': h.gravatar_url(repo.user.email, 14),
1385 1386 },
1386 1387 'name': repo.repo_name,
1387 1388 'link': RepoModel().get_url(repo),
1388 1389 'description': h.chop_at_smart(repo.description_safe, '\n'),
1389 1390 'refs': {
1390 1391 'all_refs': all_refs,
1391 1392 'selected_ref': selected_ref,
1392 1393 'select2_refs': refs_select2
1393 1394 }
1394 1395 }
1395 1396
1396 1397 def generate_pullrequest_title(self, source, source_ref, target):
1397 1398 return u'{source}#{at_ref} to {target}'.format(
1398 1399 source=source,
1399 1400 at_ref=source_ref,
1400 1401 target=target,
1401 1402 )
1402 1403
1403 1404 def _cleanup_merge_workspace(self, pull_request):
1404 1405 # Merging related cleanup
1405 1406 repo_id = pull_request.target_repo.repo_id
1406 1407 target_scm = pull_request.target_repo.scm_instance()
1407 1408 workspace_id = self._workspace_id(pull_request)
1408 1409
1409 1410 try:
1410 1411 target_scm.cleanup_merge_workspace(repo_id, workspace_id)
1411 1412 except NotImplementedError:
1412 1413 pass
1413 1414
1414 1415 def _get_repo_pullrequest_sources(
1415 1416 self, repo, commit_id=None, branch=None, bookmark=None,
1416 1417 translator=None):
1417 1418 """
1418 1419 Return a structure with repo's interesting commits, suitable for
1419 1420 the selectors in pullrequest controller
1420 1421
1421 1422 :param commit_id: a commit that must be in the list somehow
1422 1423 and selected by default
1423 1424 :param branch: a branch that must be in the list and selected
1424 1425 by default - even if closed
1425 1426 :param bookmark: a bookmark that must be in the list and selected
1426 1427 """
1427 1428 _ = translator or get_current_request().translate
1428 1429
1429 1430 commit_id = safe_str(commit_id) if commit_id else None
1430 1431 branch = safe_str(branch) if branch else None
1431 1432 bookmark = safe_str(bookmark) if bookmark else None
1432 1433
1433 1434 selected = None
1434 1435
1435 1436 # order matters: first source that has commit_id in it will be selected
1436 1437 sources = []
1437 1438 sources.append(('book', repo.bookmarks.items(), _('Bookmarks'), bookmark))
1438 1439 sources.append(('branch', repo.branches.items(), _('Branches'), branch))
1439 1440
1440 1441 if commit_id:
1441 1442 ref_commit = (h.short_id(commit_id), commit_id)
1442 1443 sources.append(('rev', [ref_commit], _('Commit IDs'), commit_id))
1443 1444
1444 1445 sources.append(
1445 1446 ('branch', repo.branches_closed.items(), _('Closed Branches'), branch),
1446 1447 )
1447 1448
1448 1449 groups = []
1449 1450 for group_key, ref_list, group_name, match in sources:
1450 1451 group_refs = []
1451 1452 for ref_name, ref_id in ref_list:
1452 1453 ref_key = '%s:%s:%s' % (group_key, ref_name, ref_id)
1453 1454 group_refs.append((ref_key, ref_name))
1454 1455
1455 1456 if not selected:
1456 1457 if set([commit_id, match]) & set([ref_id, ref_name]):
1457 1458 selected = ref_key
1458 1459
1459 1460 if group_refs:
1460 1461 groups.append((group_refs, group_name))
1461 1462
1462 1463 if not selected:
1463 1464 ref = commit_id or branch or bookmark
1464 1465 if ref:
1465 1466 raise CommitDoesNotExistError(
1466 1467 'No commit refs could be found matching: %s' % ref)
1467 1468 elif repo.DEFAULT_BRANCH_NAME in repo.branches:
1468 1469 selected = 'branch:%s:%s' % (
1469 1470 repo.DEFAULT_BRANCH_NAME,
1470 1471 repo.branches[repo.DEFAULT_BRANCH_NAME]
1471 1472 )
1472 1473 elif repo.commit_ids:
1473 1474 # make the user select in this case
1474 1475 selected = None
1475 1476 else:
1476 1477 raise EmptyRepositoryError()
1477 1478 return groups, selected
1478 1479
1479 1480 def get_diff(self, source_repo, source_ref_id, target_ref_id, context=DIFF_CONTEXT):
1480 1481 return self._get_diff_from_pr_or_version(
1481 1482 source_repo, source_ref_id, target_ref_id, context=context)
1482 1483
1483 1484 def _get_diff_from_pr_or_version(
1484 1485 self, source_repo, source_ref_id, target_ref_id, context):
1485 1486 target_commit = source_repo.get_commit(
1486 1487 commit_id=safe_str(target_ref_id))
1487 1488 source_commit = source_repo.get_commit(
1488 1489 commit_id=safe_str(source_ref_id))
1489 1490 if isinstance(source_repo, Repository):
1490 1491 vcs_repo = source_repo.scm_instance()
1491 1492 else:
1492 1493 vcs_repo = source_repo
1493 1494
1494 1495 # TODO: johbo: In the context of an update, we cannot reach
1495 1496 # the old commit anymore with our normal mechanisms. It needs
1496 1497 # some sort of special support in the vcs layer to avoid this
1497 1498 # workaround.
1498 1499 if (source_commit.raw_id == vcs_repo.EMPTY_COMMIT_ID and
1499 1500 vcs_repo.alias == 'git'):
1500 1501 source_commit.raw_id = safe_str(source_ref_id)
1501 1502
1502 1503 log.debug('calculating diff between '
1503 1504 'source_ref:%s and target_ref:%s for repo `%s`',
1504 1505 target_ref_id, source_ref_id,
1505 1506 safe_unicode(vcs_repo.path))
1506 1507
1507 1508 vcs_diff = vcs_repo.get_diff(
1508 1509 commit1=target_commit, commit2=source_commit, context=context)
1509 1510 return vcs_diff
1510 1511
1511 1512 def _is_merge_enabled(self, pull_request):
1512 1513 return self._get_general_setting(
1513 1514 pull_request, 'rhodecode_pr_merge_enabled')
1514 1515
1515 1516 def _use_rebase_for_merging(self, pull_request):
1516 1517 repo_type = pull_request.target_repo.repo_type
1517 1518 if repo_type == 'hg':
1518 1519 return self._get_general_setting(
1519 1520 pull_request, 'rhodecode_hg_use_rebase_for_merging')
1520 1521 elif repo_type == 'git':
1521 1522 return self._get_general_setting(
1522 1523 pull_request, 'rhodecode_git_use_rebase_for_merging')
1523 1524
1524 1525 return False
1525 1526
1526 1527 def _close_branch_before_merging(self, pull_request):
1527 1528 repo_type = pull_request.target_repo.repo_type
1528 1529 if repo_type == 'hg':
1529 1530 return self._get_general_setting(
1530 1531 pull_request, 'rhodecode_hg_close_branch_before_merging')
1531 1532 elif repo_type == 'git':
1532 1533 return self._get_general_setting(
1533 1534 pull_request, 'rhodecode_git_close_branch_before_merging')
1534 1535
1535 1536 return False
1536 1537
1537 1538 def _get_general_setting(self, pull_request, settings_key, default=False):
1538 1539 settings_model = VcsSettingsModel(repo=pull_request.target_repo)
1539 1540 settings = settings_model.get_general_settings()
1540 1541 return settings.get(settings_key, default)
1541 1542
1542 1543 def _log_audit_action(self, action, action_data, user, pull_request):
1543 1544 audit_logger.store(
1544 1545 action=action,
1545 1546 action_data=action_data,
1546 1547 user=user,
1547 1548 repo=pull_request.target_repo)
1548 1549
1549 1550 def get_reviewer_functions(self):
1550 1551 """
1551 1552 Fetches functions for validation and fetching default reviewers.
1552 1553 If available we use the EE package, else we fallback to CE
1553 1554 package functions
1554 1555 """
1555 1556 try:
1556 1557 from rc_reviewers.utils import get_default_reviewers_data
1557 1558 from rc_reviewers.utils import validate_default_reviewers
1558 1559 except ImportError:
1559 1560 from rhodecode.apps.repository.utils import \
1560 1561 get_default_reviewers_data
1561 1562 from rhodecode.apps.repository.utils import \
1562 1563 validate_default_reviewers
1563 1564
1564 1565 return get_default_reviewers_data, validate_default_reviewers
1565 1566
1566 1567
1567 1568 class MergeCheck(object):
1568 1569 """
1569 1570 Perform Merge Checks and returns a check object which stores information
1570 1571 about merge errors, and merge conditions
1571 1572 """
1572 1573 TODO_CHECK = 'todo'
1573 1574 PERM_CHECK = 'perm'
1574 1575 REVIEW_CHECK = 'review'
1575 1576 MERGE_CHECK = 'merge'
1576 1577
1577 1578 def __init__(self):
1578 1579 self.review_status = None
1579 1580 self.merge_possible = None
1580 1581 self.merge_msg = ''
1581 1582 self.failed = None
1582 1583 self.errors = []
1583 1584 self.error_details = OrderedDict()
1584 1585
1585 1586 def push_error(self, error_type, message, error_key, details):
1586 1587 self.failed = True
1587 1588 self.errors.append([error_type, message])
1588 1589 self.error_details[error_key] = dict(
1589 1590 details=details,
1590 1591 error_type=error_type,
1591 1592 message=message
1592 1593 )
1593 1594
1594 1595 @classmethod
1595 1596 def validate(cls, pull_request, user, translator, fail_early=False,
1596 1597 force_shadow_repo_refresh=False):
1597 1598 _ = translator
1598 1599 merge_check = cls()
1599 1600
1600 1601 # permissions to merge
1601 1602 user_allowed_to_merge = PullRequestModel().check_user_merge(
1602 1603 pull_request, user)
1603 1604 if not user_allowed_to_merge:
1604 1605 log.debug("MergeCheck: cannot merge, approval is pending.")
1605 1606
1606 1607 msg = _('User `{}` not allowed to perform merge.').format(user.username)
1607 1608 merge_check.push_error('error', msg, cls.PERM_CHECK, user.username)
1608 1609 if fail_early:
1609 1610 return merge_check
1610 1611
1611 1612 # review status, must be always present
1612 1613 review_status = pull_request.calculated_review_status()
1613 1614 merge_check.review_status = review_status
1614 1615
1615 1616 status_approved = review_status == ChangesetStatus.STATUS_APPROVED
1616 1617 if not status_approved:
1617 1618 log.debug("MergeCheck: cannot merge, approval is pending.")
1618 1619
1619 1620 msg = _('Pull request reviewer approval is pending.')
1620 1621
1621 1622 merge_check.push_error(
1622 1623 'warning', msg, cls.REVIEW_CHECK, review_status)
1623 1624
1624 1625 if fail_early:
1625 1626 return merge_check
1626 1627
1627 1628 # left over TODOs
1628 1629 todos = CommentsModel().get_unresolved_todos(pull_request)
1629 1630 if todos:
1630 1631 log.debug("MergeCheck: cannot merge, {} "
1631 1632 "unresolved todos left.".format(len(todos)))
1632 1633
1633 1634 if len(todos) == 1:
1634 1635 msg = _('Cannot merge, {} TODO still not resolved.').format(
1635 1636 len(todos))
1636 1637 else:
1637 1638 msg = _('Cannot merge, {} TODOs still not resolved.').format(
1638 1639 len(todos))
1639 1640
1640 1641 merge_check.push_error('warning', msg, cls.TODO_CHECK, todos)
1641 1642
1642 1643 if fail_early:
1643 1644 return merge_check
1644 1645
1645 1646 # merge possible, here is the filesystem simulation + shadow repo
1646 1647 merge_status, msg = PullRequestModel().merge_status(
1647 1648 pull_request, translator=translator,
1648 1649 force_shadow_repo_refresh=force_shadow_repo_refresh)
1649 1650 merge_check.merge_possible = merge_status
1650 1651 merge_check.merge_msg = msg
1651 1652 if not merge_status:
1652 1653 log.debug(
1653 1654 "MergeCheck: cannot merge, pull request merge not possible.")
1654 1655 merge_check.push_error('warning', msg, cls.MERGE_CHECK, None)
1655 1656
1656 1657 if fail_early:
1657 1658 return merge_check
1658 1659
1659 1660 log.debug('MergeCheck: is failed: %s', merge_check.failed)
1660 1661 return merge_check
1661 1662
1662 1663 @classmethod
1663 1664 def get_merge_conditions(cls, pull_request, translator):
1664 1665 _ = translator
1665 1666 merge_details = {}
1666 1667
1667 1668 model = PullRequestModel()
1668 1669 use_rebase = model._use_rebase_for_merging(pull_request)
1669 1670
1670 1671 if use_rebase:
1671 1672 merge_details['merge_strategy'] = dict(
1672 1673 details={},
1673 1674 message=_('Merge strategy: rebase')
1674 1675 )
1675 1676 else:
1676 1677 merge_details['merge_strategy'] = dict(
1677 1678 details={},
1678 1679 message=_('Merge strategy: explicit merge commit')
1679 1680 )
1680 1681
1681 1682 close_branch = model._close_branch_before_merging(pull_request)
1682 1683 if close_branch:
1683 1684 repo_type = pull_request.target_repo.repo_type
1684 1685 if repo_type == 'hg':
1685 1686 close_msg = _('Source branch will be closed after merge.')
1686 1687 elif repo_type == 'git':
1687 1688 close_msg = _('Source branch will be deleted after merge.')
1688 1689
1689 1690 merge_details['close_branch'] = dict(
1690 1691 details={},
1691 1692 message=close_msg
1692 1693 )
1693 1694
1694 1695 return merge_details
1695 1696
1696 1697 ChangeTuple = collections.namedtuple(
1697 1698 'ChangeTuple', ['added', 'common', 'removed', 'total'])
1698 1699
1699 1700 FileChangeTuple = collections.namedtuple(
1700 1701 'FileChangeTuple', ['added', 'modified', 'removed'])
@@ -1,204 +1,205 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2016-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 import pytest
23 23
24 24 from rhodecode.tests import no_newline_id_generator
25 25 from rhodecode.config.middleware import (
26 26 _sanitize_vcs_settings, _bool_setting, _string_setting, _list_setting,
27 27 _int_setting)
28 28
29 29
30 30 class TestHelperFunctions(object):
31 31 @pytest.mark.parametrize('raw, expected', [
32 32 ('true', True), (u'true', True),
33 33 ('yes', True), (u'yes', True),
34 34 ('on', True), (u'on', True),
35 35 ('false', False), (u'false', False),
36 36 ('no', False), (u'no', False),
37 37 ('off', False), (u'off', False),
38 38 ('invalid-bool-value', False),
39 39 ('invalid-∫øø@-√å@¨€', False),
40 40 (u'invalid-∫øø@-√å@¨€', False),
41 41 ])
42 42 def test_bool_setting_helper(self, raw, expected):
43 43 key = 'dummy-key'
44 44 settings = {key: raw}
45 45 _bool_setting(settings, key, None)
46 46 assert settings[key] is expected
47 47
48 48 @pytest.mark.parametrize('raw, expected', [
49 49 ('', ''),
50 50 ('test-string', 'test-string'),
51 51 ('CaSe-TeSt', 'case-test'),
52 52 ('test-string-烩€', 'test-string-烩€'),
53 53 (u'test-string-烩€', u'test-string-烩€'),
54 54 ])
55 55 def test_string_setting_helper(self, raw, expected):
56 56 key = 'dummy-key'
57 57 settings = {key: raw}
58 58 _string_setting(settings, key, None)
59 59 assert settings[key] == expected
60 60
61 61 @pytest.mark.parametrize('raw, expected', [
62 62 ('', []),
63 63 ('test', ['test']),
64 64 ('CaSe-TeSt', ['CaSe-TeSt']),
65 65 ('test-string-烩€', ['test-string-烩€']),
66 66 (u'test-string-烩€', [u'test-string-烩€']),
67 67 ('hg git svn', ['hg', 'git', 'svn']),
68 68 ('hg,git,svn', ['hg', 'git', 'svn']),
69 69 ('hg, git, svn', ['hg', 'git', 'svn']),
70 70 ('hg\ngit\nsvn', ['hg', 'git', 'svn']),
71 71 (' hg\n git\n svn ', ['hg', 'git', 'svn']),
72 72 (', hg , git , svn , ', ['', 'hg', 'git', 'svn', '']),
73 73 ('cheese,free node,other', ['cheese', 'free node', 'other']),
74 74 ], ids=no_newline_id_generator)
75 75 def test_list_setting_helper(self, raw, expected):
76 76 key = 'dummy-key'
77 77 settings = {key: raw}
78 78 _list_setting(settings, key, None)
79 79 assert settings[key] == expected
80 80
81 81 @pytest.mark.parametrize('raw, expected', [
82 82 ('0', 0),
83 83 ('-0', 0),
84 84 ('12345', 12345),
85 85 ('-12345', -12345),
86 86 (u'-12345', -12345),
87 87 ])
88 88 def test_int_setting_helper(self, raw, expected):
89 89 key = 'dummy-key'
90 90 settings = {key: raw}
91 91 _int_setting(settings, key, None)
92 92 assert settings[key] == expected
93 93
94 94 @pytest.mark.parametrize('raw', [
95 95 ('0xff'),
96 96 (''),
97 97 ('invalid-int'),
98 98 ('invalid-⁄~†'),
99 99 (u'invalid-⁄~†'),
100 100 ])
101 101 def test_int_setting_helper_invalid_input(self, raw):
102 102 key = 'dummy-key'
103 103 settings = {key: raw}
104 104 with pytest.raises(Exception):
105 105 _int_setting(settings, key, None)
106 106
107 107
108 108 class TestSanitizeVcsSettings(object):
109 109 _bool_settings = [
110 110 ('vcs.hooks.direct_calls', False),
111 111 ('vcs.server.enable', True),
112 112 ('vcs.start_server', False),
113 113 ('startup.import_repos', False),
114 114 ]
115 115
116 116 _string_settings = [
117 117 ('vcs.svn.compatible_version', ''),
118 118 ('git_rev_filter', '--all'),
119 119 ('vcs.hooks.protocol', 'http'),
120 ('vcs.hooks.host', '127.0.0.1'),
120 121 ('vcs.scm_app_implementation', 'http'),
121 122 ('vcs.server', ''),
122 123 ('vcs.server.log_level', 'debug'),
123 124 ('vcs.server.protocol', 'http'),
124 125 ]
125 126
126 127 _list_settings = [
127 128 ('vcs.backends', 'hg git'),
128 129 ]
129 130
130 131 @pytest.mark.parametrize('key, default', _list_settings)
131 132 def test_list_setting_spacesep_list(self, key, default):
132 133 test_list = ['test', 'list', 'values', 'for', key]
133 134 input_value = ' '.join(test_list)
134 135 settings = {key: input_value}
135 136 _sanitize_vcs_settings(settings)
136 137 assert settings[key] == test_list
137 138
138 139 @pytest.mark.parametrize('key, default', _list_settings)
139 140 def test_list_setting_newlinesep_list(self, key, default):
140 141 test_list = ['test', 'list', 'values', 'for', key]
141 142 input_value = '\n'.join(test_list)
142 143 settings = {key: input_value}
143 144 _sanitize_vcs_settings(settings)
144 145 assert settings[key] == test_list
145 146
146 147 @pytest.mark.parametrize('key, default', _list_settings)
147 148 def test_list_setting_commasep_list(self, key, default):
148 149 test_list = ['test', 'list', 'values', 'for', key]
149 150 input_value = ','.join(test_list)
150 151 settings = {key: input_value}
151 152 _sanitize_vcs_settings(settings)
152 153 assert settings[key] == test_list
153 154
154 155 @pytest.mark.parametrize('key, default', _list_settings)
155 156 def test_list_setting_comma_and_space_sep_list(self, key, default):
156 157 test_list = ['test', 'list', 'values', 'for', key]
157 158 input_value = ', '.join(test_list)
158 159 settings = {key: input_value}
159 160 _sanitize_vcs_settings(settings)
160 161 assert settings[key] == test_list
161 162
162 163 @pytest.mark.parametrize('key, default', _string_settings)
163 164 def test_string_setting_string(self, key, default):
164 165 test_value = 'test-string-for-{}'.format(key)
165 166 settings = {key: test_value}
166 167 _sanitize_vcs_settings(settings)
167 168 assert settings[key] == test_value
168 169
169 170 @pytest.mark.parametrize('key, default', _string_settings)
170 171 def test_string_setting_default(self, key, default):
171 172 settings = {}
172 173 _sanitize_vcs_settings(settings)
173 174 assert settings[key] == default
174 175
175 176 @pytest.mark.parametrize('key, default', _string_settings)
176 177 def test_string_setting_lowercase(self, key, default):
177 178 test_value = 'Test-String-For-{}'.format(key)
178 179 settings = {key: test_value}
179 180 _sanitize_vcs_settings(settings)
180 181 assert settings[key] == test_value.lower()
181 182
182 183 @pytest.mark.parametrize('key, default', _bool_settings)
183 184 def test_bool_setting_true(self, key, default):
184 185 settings = {key: 'true'}
185 186 _sanitize_vcs_settings(settings)
186 187 assert settings[key] is True
187 188
188 189 @pytest.mark.parametrize('key, default', _bool_settings)
189 190 def test_bool_setting_false(self, key, default):
190 191 settings = {key: 'false'}
191 192 _sanitize_vcs_settings(settings)
192 193 assert settings[key] is False
193 194
194 195 @pytest.mark.parametrize('key, default', _bool_settings)
195 196 def test_bool_setting_invalid_string(self, key, default):
196 197 settings = {key: 'no-bool-val-string'}
197 198 _sanitize_vcs_settings(settings)
198 199 assert settings[key] is False
199 200
200 201 @pytest.mark.parametrize('key, default', _bool_settings)
201 202 def test_bool_setting_default(self, key, default):
202 203 settings = {}
203 204 _sanitize_vcs_settings(settings)
204 205 assert settings[key] is default
@@ -1,472 +1,473 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import base64
22 22
23 23 import mock
24 24 import pytest
25 25
26 26 from rhodecode.lib.utils2 import AttributeDict
27 27 from rhodecode.tests.utils import CustomTestApp
28 28
29 29 from rhodecode.lib.caching_query import FromCache
30 30 from rhodecode.lib.hooks_daemon import DummyHooksCallbackDaemon
31 31 from rhodecode.lib.middleware import simplevcs
32 32 from rhodecode.lib.middleware.https_fixup import HttpsFixup
33 33 from rhodecode.lib.middleware.utils import scm_app_http
34 34 from rhodecode.model.db import User, _hash_key
35 35 from rhodecode.model.meta import Session
36 36 from rhodecode.tests import (
37 37 HG_REPO, TEST_USER_ADMIN_LOGIN, TEST_USER_ADMIN_PASS)
38 38 from rhodecode.tests.lib.middleware import mock_scm_app
39 39
40 40
41 41 class StubVCSController(simplevcs.SimpleVCS):
42 42
43 43 SCM = 'hg'
44 44 stub_response_body = tuple()
45 45
46 46 def __init__(self, *args, **kwargs):
47 47 super(StubVCSController, self).__init__(*args, **kwargs)
48 48 self._action = 'pull'
49 49 self._is_shadow_repo_dir = True
50 50 self._name = HG_REPO
51 51 self.set_repo_names(None)
52 52
53 53 @property
54 54 def is_shadow_repo_dir(self):
55 55 return self._is_shadow_repo_dir
56 56
57 57 def _get_repository_name(self, environ):
58 58 return self._name
59 59
60 60 def _get_action(self, environ):
61 61 return self._action
62 62
63 63 def _create_wsgi_app(self, repo_path, repo_name, config):
64 64 def fake_app(environ, start_response):
65 65 headers = [
66 66 ('Http-Accept', 'application/mercurial')
67 67 ]
68 68 start_response('200 OK', headers)
69 69 return self.stub_response_body
70 70 return fake_app
71 71
72 72 def _create_config(self, extras, repo_name):
73 73 return None
74 74
75 75
76 76 @pytest.fixture
77 77 def vcscontroller(baseapp, config_stub, request_stub):
78 78 config_stub.testing_securitypolicy()
79 79 config_stub.include('rhodecode.authentication')
80 80
81 81 controller = StubVCSController(
82 82 baseapp.config.get_settings(), request_stub.registry)
83 83 app = HttpsFixup(controller, baseapp.config.get_settings())
84 84 app = CustomTestApp(app)
85 85
86 86 _remove_default_user_from_query_cache()
87 87
88 88 # Sanity checks that things are set up correctly
89 89 app.get('/' + HG_REPO, status=200)
90 90
91 91 app.controller = controller
92 92 return app
93 93
94 94
95 95 def _remove_default_user_from_query_cache():
96 96 user = User.get_default_user(cache=True)
97 97 query = Session().query(User).filter(User.username == user.username)
98 98 query = query.options(
99 99 FromCache("sql_cache_short", "get_user_%s" % _hash_key(user.username)))
100 100 query.invalidate()
101 101 Session().expire(user)
102 102
103 103
104 104 def test_handles_exceptions_during_permissions_checks(
105 105 vcscontroller, disable_anonymous_user):
106 106 user_and_pass = '%s:%s' % (TEST_USER_ADMIN_LOGIN, TEST_USER_ADMIN_PASS)
107 107 auth_password = base64.encodestring(user_and_pass).strip()
108 108 extra_environ = {
109 109 'AUTH_TYPE': 'Basic',
110 110 'HTTP_AUTHORIZATION': 'Basic %s' % auth_password,
111 111 'REMOTE_USER': TEST_USER_ADMIN_LOGIN,
112 112 }
113 113
114 114 # Verify that things are hooked up correctly
115 115 vcscontroller.get('/', status=200, extra_environ=extra_environ)
116 116
117 117 # Simulate trouble during permission checks
118 118 with mock.patch('rhodecode.model.db.User.get_by_username',
119 119 side_effect=Exception) as get_user:
120 120 # Verify that a correct 500 is returned and check that the expected
121 121 # code path was hit.
122 122 vcscontroller.get('/', status=500, extra_environ=extra_environ)
123 123 assert get_user.called
124 124
125 125
126 126 def test_returns_forbidden_if_no_anonymous_access(
127 127 vcscontroller, disable_anonymous_user):
128 128 vcscontroller.get('/', status=401)
129 129
130 130
131 131 class StubFailVCSController(simplevcs.SimpleVCS):
132 132 def _handle_request(self, environ, start_response):
133 133 raise Exception("BOOM")
134 134
135 135
136 136 @pytest.fixture(scope='module')
137 137 def fail_controller(baseapp):
138 138 controller = StubFailVCSController(
139 139 baseapp.config.get_settings(), baseapp.config)
140 140 controller = HttpsFixup(controller, baseapp.config.get_settings())
141 141 controller = CustomTestApp(controller)
142 142 return controller
143 143
144 144
145 145 def test_handles_exceptions_as_internal_server_error(fail_controller):
146 146 fail_controller.get('/', status=500)
147 147
148 148
149 149 def test_provides_traceback_for_appenlight(fail_controller):
150 150 response = fail_controller.get(
151 151 '/', status=500, extra_environ={'appenlight.client': 'fake'})
152 152 assert 'appenlight.__traceback' in response.request.environ
153 153
154 154
155 155 def test_provides_utils_scm_app_as_scm_app_by_default(baseapp, request_stub):
156 156 controller = StubVCSController(baseapp.config.get_settings(), request_stub.registry)
157 157 assert controller.scm_app is scm_app_http
158 158
159 159
160 160 def test_allows_to_override_scm_app_via_config(baseapp, request_stub):
161 161 config = baseapp.config.get_settings().copy()
162 162 config['vcs.scm_app_implementation'] = (
163 163 'rhodecode.tests.lib.middleware.mock_scm_app')
164 164 controller = StubVCSController(config, request_stub.registry)
165 165 assert controller.scm_app is mock_scm_app
166 166
167 167
168 168 @pytest.mark.parametrize('query_string, expected', [
169 169 ('cmd=stub_command', True),
170 170 ('cmd=listkeys', False),
171 171 ])
172 172 def test_should_check_locking(query_string, expected):
173 173 result = simplevcs._should_check_locking(query_string)
174 174 assert result == expected
175 175
176 176
177 177 class TestShadowRepoRegularExpression(object):
178 178 pr_segment = 'pull-request'
179 179 shadow_segment = 'repository'
180 180
181 181 @pytest.mark.parametrize('url, expected', [
182 182 # repo with/without groups
183 183 ('My-Repo/{pr_segment}/1/{shadow_segment}', True),
184 184 ('Group/My-Repo/{pr_segment}/2/{shadow_segment}', True),
185 185 ('Group/Sub-Group/My-Repo/{pr_segment}/3/{shadow_segment}', True),
186 186 ('Group/Sub-Group1/Sub-Group2/My-Repo/{pr_segment}/3/{shadow_segment}', True),
187 187
188 188 # pull request ID
189 189 ('MyRepo/{pr_segment}/1/{shadow_segment}', True),
190 190 ('MyRepo/{pr_segment}/1234567890/{shadow_segment}', True),
191 191 ('MyRepo/{pr_segment}/-1/{shadow_segment}', False),
192 192 ('MyRepo/{pr_segment}/invalid/{shadow_segment}', False),
193 193
194 194 # unicode
195 195 (u'Sp€çîál-Repö/{pr_segment}/1/{shadow_segment}', True),
196 196 (u'Sp€çîál-Gröüp/Sp€çîál-Repö/{pr_segment}/1/{shadow_segment}', True),
197 197
198 198 # trailing/leading slash
199 199 ('/My-Repo/{pr_segment}/1/{shadow_segment}', False),
200 200 ('My-Repo/{pr_segment}/1/{shadow_segment}/', False),
201 201 ('/My-Repo/{pr_segment}/1/{shadow_segment}/', False),
202 202
203 203 # misc
204 204 ('My-Repo/{pr_segment}/1/{shadow_segment}/extra', False),
205 205 ('My-Repo/{pr_segment}/1/{shadow_segment}extra', False),
206 206 ])
207 207 def test_shadow_repo_regular_expression(self, url, expected):
208 208 from rhodecode.lib.middleware.simplevcs import SimpleVCS
209 209 url = url.format(
210 210 pr_segment=self.pr_segment,
211 211 shadow_segment=self.shadow_segment)
212 212 match_obj = SimpleVCS.shadow_repo_re.match(url)
213 213 assert (match_obj is not None) == expected
214 214
215 215
216 216 @pytest.mark.backends('git', 'hg')
217 217 class TestShadowRepoExposure(object):
218 218
219 219 def test_pull_on_shadow_repo_propagates_to_wsgi_app(
220 220 self, baseapp, request_stub):
221 221 """
222 222 Check that a pull action to a shadow repo is propagated to the
223 223 underlying wsgi app.
224 224 """
225 225 controller = StubVCSController(
226 226 baseapp.config.get_settings(), request_stub.registry)
227 227 controller._check_ssl = mock.Mock()
228 228 controller.is_shadow_repo = True
229 229 controller._action = 'pull'
230 230 controller._is_shadow_repo_dir = True
231 231 controller.stub_response_body = 'dummy body value'
232 232 controller._get_default_cache_ttl = mock.Mock(
233 233 return_value=(False, 0))
234 234
235 235 environ_stub = {
236 236 'HTTP_HOST': 'test.example.com',
237 237 'HTTP_ACCEPT': 'application/mercurial',
238 238 'REQUEST_METHOD': 'GET',
239 239 'wsgi.url_scheme': 'http',
240 240 }
241 241
242 242 response = controller(environ_stub, mock.Mock())
243 243 response_body = ''.join(response)
244 244
245 245 # Assert that we got the response from the wsgi app.
246 246 assert response_body == controller.stub_response_body
247 247
248 248 def test_pull_on_shadow_repo_that_is_missing(self, baseapp, request_stub):
249 249 """
250 250 Check that a pull action to a shadow repo is propagated to the
251 251 underlying wsgi app.
252 252 """
253 253 controller = StubVCSController(
254 254 baseapp.config.get_settings(), request_stub.registry)
255 255 controller._check_ssl = mock.Mock()
256 256 controller.is_shadow_repo = True
257 257 controller._action = 'pull'
258 258 controller._is_shadow_repo_dir = False
259 259 controller.stub_response_body = 'dummy body value'
260 260 environ_stub = {
261 261 'HTTP_HOST': 'test.example.com',
262 262 'HTTP_ACCEPT': 'application/mercurial',
263 263 'REQUEST_METHOD': 'GET',
264 264 'wsgi.url_scheme': 'http',
265 265 }
266 266
267 267 response = controller(environ_stub, mock.Mock())
268 268 response_body = ''.join(response)
269 269
270 270 # Assert that we got the response from the wsgi app.
271 271 assert '404 Not Found' in response_body
272 272
273 273 def test_push_on_shadow_repo_raises(self, baseapp, request_stub):
274 274 """
275 275 Check that a push action to a shadow repo is aborted.
276 276 """
277 277 controller = StubVCSController(
278 278 baseapp.config.get_settings(), request_stub.registry)
279 279 controller._check_ssl = mock.Mock()
280 280 controller.is_shadow_repo = True
281 281 controller._action = 'push'
282 282 controller.stub_response_body = 'dummy body value'
283 283 environ_stub = {
284 284 'HTTP_HOST': 'test.example.com',
285 285 'HTTP_ACCEPT': 'application/mercurial',
286 286 'REQUEST_METHOD': 'GET',
287 287 'wsgi.url_scheme': 'http',
288 288 }
289 289
290 290 response = controller(environ_stub, mock.Mock())
291 291 response_body = ''.join(response)
292 292
293 293 assert response_body != controller.stub_response_body
294 294 # Assert that a 406 error is returned.
295 295 assert '406 Not Acceptable' in response_body
296 296
297 297 def test_set_repo_names_no_shadow(self, baseapp, request_stub):
298 298 """
299 299 Check that the set_repo_names method sets all names to the one returned
300 300 by the _get_repository_name method on a request to a non shadow repo.
301 301 """
302 302 environ_stub = {}
303 303 controller = StubVCSController(
304 304 baseapp.config.get_settings(), request_stub.registry)
305 305 controller._name = 'RepoGroup/MyRepo'
306 306 controller.set_repo_names(environ_stub)
307 307 assert not controller.is_shadow_repo
308 308 assert (controller.url_repo_name ==
309 309 controller.acl_repo_name ==
310 310 controller.vcs_repo_name ==
311 311 controller._get_repository_name(environ_stub))
312 312
313 313 def test_set_repo_names_with_shadow(
314 314 self, baseapp, pr_util, config_stub, request_stub):
315 315 """
316 316 Check that the set_repo_names method sets correct names on a request
317 317 to a shadow repo.
318 318 """
319 319 from rhodecode.model.pull_request import PullRequestModel
320 320
321 321 pull_request = pr_util.create_pull_request()
322 322 shadow_url = '{target}/{pr_segment}/{pr_id}/{shadow_segment}'.format(
323 323 target=pull_request.target_repo.repo_name,
324 324 pr_id=pull_request.pull_request_id,
325 325 pr_segment=TestShadowRepoRegularExpression.pr_segment,
326 326 shadow_segment=TestShadowRepoRegularExpression.shadow_segment)
327 327 controller = StubVCSController(
328 328 baseapp.config.get_settings(), request_stub.registry)
329 329 controller._name = shadow_url
330 330 controller.set_repo_names({})
331 331
332 332 # Get file system path to shadow repo for assertions.
333 333 workspace_id = PullRequestModel()._workspace_id(pull_request)
334 334 target_vcs = pull_request.target_repo.scm_instance()
335 335 vcs_repo_name = target_vcs._get_shadow_repository_path(
336 336 pull_request.target_repo.repo_id, workspace_id)
337 337
338 338 assert controller.vcs_repo_name == vcs_repo_name
339 339 assert controller.url_repo_name == shadow_url
340 340 assert controller.acl_repo_name == pull_request.target_repo.repo_name
341 341 assert controller.is_shadow_repo
342 342
343 343 def test_set_repo_names_with_shadow_but_missing_pr(
344 344 self, baseapp, pr_util, config_stub, request_stub):
345 345 """
346 346 Checks that the set_repo_names method enforces matching target repos
347 347 and pull request IDs.
348 348 """
349 349 pull_request = pr_util.create_pull_request()
350 350 shadow_url = '{target}/{pr_segment}/{pr_id}/{shadow_segment}'.format(
351 351 target=pull_request.target_repo.repo_name,
352 352 pr_id=999999999,
353 353 pr_segment=TestShadowRepoRegularExpression.pr_segment,
354 354 shadow_segment=TestShadowRepoRegularExpression.shadow_segment)
355 355 controller = StubVCSController(
356 356 baseapp.config.get_settings(), request_stub.registry)
357 357 controller._name = shadow_url
358 358 controller.set_repo_names({})
359 359
360 360 assert not controller.is_shadow_repo
361 361 assert (controller.url_repo_name ==
362 362 controller.acl_repo_name ==
363 363 controller.vcs_repo_name)
364 364
365 365
366 366 @pytest.mark.usefixtures('baseapp')
367 367 class TestGenerateVcsResponse(object):
368 368
369 369 def test_ensures_that_start_response_is_called_early_enough(self):
370 370 self.call_controller_with_response_body(iter(['a', 'b']))
371 371 assert self.start_response.called
372 372
373 373 def test_invalidates_cache_after_body_is_consumed(self):
374 374 result = self.call_controller_with_response_body(iter(['a', 'b']))
375 375 assert not self.was_cache_invalidated()
376 376 # Consume the result
377 377 list(result)
378 378 assert self.was_cache_invalidated()
379 379
380 380 def test_raises_unknown_exceptions(self):
381 381 result = self.call_controller_with_response_body(
382 382 self.raise_result_iter(vcs_kind='unknown'))
383 383 with pytest.raises(Exception):
384 384 list(result)
385 385
386 386 def test_prepare_callback_daemon_is_called(self):
387 387 def side_effect(extras, environ, action, txn_id=None):
388 388 return DummyHooksCallbackDaemon(), extras
389 389
390 390 prepare_patcher = mock.patch.object(
391 391 StubVCSController, '_prepare_callback_daemon')
392 392 with prepare_patcher as prepare_mock:
393 393 prepare_mock.side_effect = side_effect
394 394 self.call_controller_with_response_body(iter(['a', 'b']))
395 395 assert prepare_mock.called
396 396 assert prepare_mock.call_count == 1
397 397
398 398 def call_controller_with_response_body(self, response_body):
399 399 settings = {
400 400 'base_path': 'fake_base_path',
401 401 'vcs.hooks.protocol': 'http',
402 402 'vcs.hooks.direct_calls': False,
403 403 }
404 404 registry = AttributeDict()
405 405 controller = StubVCSController(settings, registry)
406 406 controller._invalidate_cache = mock.Mock()
407 407 controller.stub_response_body = response_body
408 408 self.start_response = mock.Mock()
409 409 result = controller._generate_vcs_response(
410 410 environ={}, start_response=self.start_response,
411 411 repo_path='fake_repo_path',
412 412 extras={}, action='push')
413 413 self.controller = controller
414 414 return result
415 415
416 416 def raise_result_iter(self, vcs_kind='repo_locked'):
417 417 """
418 418 Simulates an exception due to a vcs raised exception if kind vcs_kind
419 419 """
420 420 raise self.vcs_exception(vcs_kind=vcs_kind)
421 421 yield "never_reached"
422 422
423 423 def vcs_exception(self, vcs_kind='repo_locked'):
424 424 locked_exception = Exception('TEST_MESSAGE')
425 425 locked_exception._vcs_kind = vcs_kind
426 426 return locked_exception
427 427
428 428 def was_cache_invalidated(self):
429 429 return self.controller._invalidate_cache.called
430 430
431 431
432 432 class TestInitializeGenerator(object):
433 433
434 434 def test_drains_first_element(self):
435 435 gen = self.factory(['__init__', 1, 2])
436 436 result = list(gen)
437 437 assert result == [1, 2]
438 438
439 439 @pytest.mark.parametrize('values', [
440 440 [],
441 441 [1, 2],
442 442 ])
443 443 def test_raises_value_error(self, values):
444 444 with pytest.raises(ValueError):
445 445 self.factory(values)
446 446
447 447 @simplevcs.initialize_generator
448 448 def factory(self, iterable):
449 449 for elem in iterable:
450 450 yield elem
451 451
452 452
453 453 class TestPrepareHooksDaemon(object):
454 454 def test_calls_imported_prepare_callback_daemon(self, app_settings, request_stub):
455 455 expected_extras = {'extra1': 'value1'}
456 456 daemon = DummyHooksCallbackDaemon()
457 457
458 458 controller = StubVCSController(app_settings, request_stub.registry)
459 459 prepare_patcher = mock.patch.object(
460 460 simplevcs, 'prepare_callback_daemon',
461 461 return_value=(daemon, expected_extras))
462 462 with prepare_patcher as prepare_mock:
463 463 callback_daemon, extras = controller._prepare_callback_daemon(
464 464 expected_extras.copy(), {}, 'push')
465 465 prepare_mock.assert_called_once_with(
466 466 expected_extras,
467 467 protocol=app_settings['vcs.hooks.protocol'],
468 host=app_settings['vcs.hooks.host'],
468 469 txn_id=None,
469 470 use_direct_calls=app_settings['vcs.hooks.direct_calls'])
470 471
471 472 assert callback_daemon == daemon
472 473 assert extras == extras
@@ -1,329 +1,331 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import json
22 22 import logging
23 23 from StringIO import StringIO
24 24
25 25 import mock
26 26 import pytest
27 27
28 28 from rhodecode.lib import hooks_daemon
29 29 from rhodecode.tests.utils import assert_message_in_log
30 30
31 31
32 32 class TestDummyHooksCallbackDaemon(object):
33 33 def test_hooks_module_path_set_properly(self):
34 34 daemon = hooks_daemon.DummyHooksCallbackDaemon()
35 35 assert daemon.hooks_module == 'rhodecode.lib.hooks_daemon'
36 36
37 37 def test_logs_entering_the_hook(self):
38 38 daemon = hooks_daemon.DummyHooksCallbackDaemon()
39 39 with mock.patch.object(hooks_daemon.log, 'debug') as log_mock:
40 40 with daemon as return_value:
41 41 log_mock.assert_called_once_with(
42 42 'Running dummy hooks callback daemon')
43 43 assert return_value == daemon
44 44
45 45 def test_logs_exiting_the_hook(self):
46 46 daemon = hooks_daemon.DummyHooksCallbackDaemon()
47 47 with mock.patch.object(hooks_daemon.log, 'debug') as log_mock:
48 48 with daemon:
49 49 pass
50 50 log_mock.assert_called_with('Exiting dummy hooks callback daemon')
51 51
52 52
53 53 class TestHooks(object):
54 54 def test_hooks_can_be_used_as_a_context_processor(self):
55 55 hooks = hooks_daemon.Hooks()
56 56 with hooks as return_value:
57 57 pass
58 58 assert hooks == return_value
59 59
60 60
61 61 class TestHooksHttpHandler(object):
62 62 def test_read_request_parses_method_name_and_arguments(self):
63 63 data = {
64 64 'method': 'test',
65 65 'extras': {
66 66 'param1': 1,
67 67 'param2': 'a'
68 68 }
69 69 }
70 70 request = self._generate_post_request(data)
71 71 hooks_patcher = mock.patch.object(
72 72 hooks_daemon.Hooks, data['method'], create=True, return_value=1)
73 73
74 74 with hooks_patcher as hooks_mock:
75 75 MockServer(hooks_daemon.HooksHttpHandler, request)
76 76
77 77 hooks_mock.assert_called_once_with(data['extras'])
78 78
79 79 def test_hooks_serialized_result_is_returned(self):
80 80 request = self._generate_post_request({})
81 81 rpc_method = 'test'
82 82 hook_result = {
83 83 'first': 'one',
84 84 'second': 2
85 85 }
86 86 read_patcher = mock.patch.object(
87 87 hooks_daemon.HooksHttpHandler, '_read_request',
88 88 return_value=(rpc_method, {}))
89 89 hooks_patcher = mock.patch.object(
90 90 hooks_daemon.Hooks, rpc_method, create=True,
91 91 return_value=hook_result)
92 92
93 93 with read_patcher, hooks_patcher:
94 94 server = MockServer(hooks_daemon.HooksHttpHandler, request)
95 95
96 96 expected_result = json.dumps(hook_result)
97 97 assert server.request.output_stream.buflist[-1] == expected_result
98 98
99 99 def test_exception_is_returned_in_response(self):
100 100 request = self._generate_post_request({})
101 101 rpc_method = 'test'
102 102 read_patcher = mock.patch.object(
103 103 hooks_daemon.HooksHttpHandler, '_read_request',
104 104 return_value=(rpc_method, {}))
105 105 hooks_patcher = mock.patch.object(
106 106 hooks_daemon.Hooks, rpc_method, create=True,
107 107 side_effect=Exception('Test exception'))
108 108
109 109 with read_patcher, hooks_patcher:
110 110 server = MockServer(hooks_daemon.HooksHttpHandler, request)
111 111
112 112 org_exc = json.loads(server.request.output_stream.buflist[-1])
113 113 expected_result = {
114 114 'exception': 'Exception',
115 115 'exception_traceback': org_exc['exception_traceback'],
116 116 'exception_args': ['Test exception']
117 117 }
118 118 assert org_exc == expected_result
119 119
120 120 def test_log_message_writes_to_debug_log(self, caplog):
121 121 ip_port = ('0.0.0.0', 8888)
122 122 handler = hooks_daemon.HooksHttpHandler(
123 123 MockRequest('POST /'), ip_port, mock.Mock())
124 124 fake_date = '1/Nov/2015 00:00:00'
125 125 date_patcher = mock.patch.object(
126 126 handler, 'log_date_time_string', return_value=fake_date)
127 127 with date_patcher, caplog.at_level(logging.DEBUG):
128 128 handler.log_message('Some message %d, %s', 123, 'string')
129 129
130 130 expected_message = '{} - - [{}] Some message 123, string'.format(
131 131 ip_port[0], fake_date)
132 132 assert_message_in_log(
133 133 caplog.records, expected_message,
134 134 levelno=logging.DEBUG, module='hooks_daemon')
135 135
136 136 def _generate_post_request(self, data):
137 137 payload = json.dumps(data)
138 138 return 'POST / HTTP/1.0\nContent-Length: {}\n\n{}'.format(
139 139 len(payload), payload)
140 140
141 141
142 142 class ThreadedHookCallbackDaemon(object):
143 143 def test_constructor_calls_prepare(self):
144 144 prepare_daemon_patcher = mock.patch.object(
145 145 hooks_daemon.ThreadedHookCallbackDaemon, '_prepare')
146 146 with prepare_daemon_patcher as prepare_daemon_mock:
147 147 hooks_daemon.ThreadedHookCallbackDaemon()
148 148 prepare_daemon_mock.assert_called_once_with()
149 149
150 150 def test_run_is_called_on_context_start(self):
151 151 patchers = mock.patch.multiple(
152 152 hooks_daemon.ThreadedHookCallbackDaemon,
153 153 _run=mock.DEFAULT, _prepare=mock.DEFAULT, __exit__=mock.DEFAULT)
154 154
155 155 with patchers as mocks:
156 156 daemon = hooks_daemon.ThreadedHookCallbackDaemon()
157 157 with daemon as daemon_context:
158 158 pass
159 159 mocks['_run'].assert_called_once_with()
160 160 assert daemon_context == daemon
161 161
162 162 def test_stop_is_called_on_context_exit(self):
163 163 patchers = mock.patch.multiple(
164 164 hooks_daemon.ThreadedHookCallbackDaemon,
165 165 _run=mock.DEFAULT, _prepare=mock.DEFAULT, _stop=mock.DEFAULT)
166 166
167 167 with patchers as mocks:
168 168 daemon = hooks_daemon.ThreadedHookCallbackDaemon()
169 169 with daemon as daemon_context:
170 170 assert mocks['_stop'].call_count == 0
171 171
172 172 mocks['_stop'].assert_called_once_with()
173 173 assert daemon_context == daemon
174 174
175 175
176 176 class TestHttpHooksCallbackDaemon(object):
177 177 def test_prepare_inits_daemon_variable(self, tcp_server, caplog):
178 178 with self._tcp_patcher(tcp_server), caplog.at_level(logging.DEBUG):
179 179 daemon = hooks_daemon.HttpHooksCallbackDaemon()
180 180 assert daemon._daemon == tcp_server
181 181
182 182 _, port = tcp_server.server_address
183 expected_uri = '{}:{}'.format(daemon.IP_ADDRESS, port)
183 expected_uri = '{}:{}'.format('127.0.0.1', port)
184 184 msg = 'Preparing HTTP callback daemon at `{}` and ' \
185 185 'registering hook object'.format(expected_uri)
186 186 assert_message_in_log(
187 187 caplog.records, msg, levelno=logging.DEBUG, module='hooks_daemon')
188 188
189 189 def test_prepare_inits_hooks_uri_and_logs_it(
190 190 self, tcp_server, caplog):
191 191 with self._tcp_patcher(tcp_server), caplog.at_level(logging.DEBUG):
192 192 daemon = hooks_daemon.HttpHooksCallbackDaemon()
193 193
194 194 _, port = tcp_server.server_address
195 expected_uri = '{}:{}'.format(daemon.IP_ADDRESS, port)
195 expected_uri = '{}:{}'.format('127.0.0.1', port)
196 196 assert daemon.hooks_uri == expected_uri
197 197
198 198 msg = 'Preparing HTTP callback daemon at `{}` and ' \
199 199 'registering hook object'.format(expected_uri)
200 200 assert_message_in_log(
201 201 caplog.records, msg,
202 202 levelno=logging.DEBUG, module='hooks_daemon')
203 203
204 204 def test_run_creates_a_thread(self, tcp_server):
205 205 thread = mock.Mock()
206 206
207 207 with self._tcp_patcher(tcp_server):
208 208 daemon = hooks_daemon.HttpHooksCallbackDaemon()
209 209
210 210 with self._thread_patcher(thread) as thread_mock:
211 211 daemon._run()
212 212
213 213 thread_mock.assert_called_once_with(
214 214 target=tcp_server.serve_forever,
215 215 kwargs={'poll_interval': daemon.POLL_INTERVAL})
216 216 assert thread.daemon is True
217 217 thread.start.assert_called_once_with()
218 218
219 219 def test_run_logs(self, tcp_server, caplog):
220 220
221 221 with self._tcp_patcher(tcp_server):
222 222 daemon = hooks_daemon.HttpHooksCallbackDaemon()
223 223
224 224 with self._thread_patcher(mock.Mock()), caplog.at_level(logging.DEBUG):
225 225 daemon._run()
226 226
227 227 assert_message_in_log(
228 228 caplog.records,
229 229 'Running event loop of callback daemon in background thread',
230 230 levelno=logging.DEBUG, module='hooks_daemon')
231 231
232 232 def test_stop_cleans_up_the_connection(self, tcp_server, caplog):
233 233 thread = mock.Mock()
234 234
235 235 with self._tcp_patcher(tcp_server):
236 236 daemon = hooks_daemon.HttpHooksCallbackDaemon()
237 237
238 238 with self._thread_patcher(thread), caplog.at_level(logging.DEBUG):
239 239 with daemon:
240 240 assert daemon._daemon == tcp_server
241 241 assert daemon._callback_thread == thread
242 242
243 243 assert daemon._daemon is None
244 244 assert daemon._callback_thread is None
245 245 tcp_server.shutdown.assert_called_with()
246 246 thread.join.assert_called_once_with()
247 247
248 248 assert_message_in_log(
249 249 caplog.records, 'Waiting for background thread to finish.',
250 250 levelno=logging.DEBUG, module='hooks_daemon')
251 251
252 252 def _tcp_patcher(self, tcp_server):
253 253 return mock.patch.object(
254 254 hooks_daemon, 'TCPServer', return_value=tcp_server)
255 255
256 256 def _thread_patcher(self, thread):
257 257 return mock.patch.object(
258 258 hooks_daemon.threading, 'Thread', return_value=thread)
259 259
260 260
261 261 class TestPrepareHooksDaemon(object):
262 262 @pytest.mark.parametrize('protocol', ('http',))
263 263 def test_returns_dummy_hooks_callback_daemon_when_using_direct_calls(
264 264 self, protocol):
265 265 expected_extras = {'extra1': 'value1'}
266 266 callback, extras = hooks_daemon.prepare_callback_daemon(
267 expected_extras.copy(), protocol=protocol, use_direct_calls=True)
267 expected_extras.copy(), protocol=protocol,
268 host='127.0.0.1', use_direct_calls=True)
268 269 assert isinstance(callback, hooks_daemon.DummyHooksCallbackDaemon)
269 270 expected_extras['hooks_module'] = 'rhodecode.lib.hooks_daemon'
270 271 expected_extras['time'] = extras['time']
271 272 assert 'extra1' in extras
272 273
273 274 @pytest.mark.parametrize('protocol, expected_class', (
274 275 ('http', hooks_daemon.HttpHooksCallbackDaemon),
275 276 ))
276 277 def test_returns_real_hooks_callback_daemon_when_protocol_is_specified(
277 278 self, protocol, expected_class):
278 279 expected_extras = {
279 280 'extra1': 'value1',
280 281 'txn_id': 'txnid2',
281 282 'hooks_protocol': protocol.lower()
282 283 }
283 284 callback, extras = hooks_daemon.prepare_callback_daemon(
284 expected_extras.copy(), protocol=protocol, use_direct_calls=False,
285 expected_extras.copy(), protocol=protocol, host='127.0.0.1',
286 use_direct_calls=False,
285 287 txn_id='txnid2')
286 288 assert isinstance(callback, expected_class)
287 289 extras.pop('hooks_uri')
288 290 expected_extras['time'] = extras['time']
289 291 assert extras == expected_extras
290 292
291 293 @pytest.mark.parametrize('protocol', (
292 294 'invalid',
293 295 'Http',
294 296 'HTTP',
295 297 ))
296 298 def test_raises_on_invalid_protocol(self, protocol):
297 299 expected_extras = {
298 300 'extra1': 'value1',
299 301 'hooks_protocol': protocol.lower()
300 302 }
301 303 with pytest.raises(Exception):
302 304 callback, extras = hooks_daemon.prepare_callback_daemon(
303 305 expected_extras.copy(),
304 protocol=protocol,
306 protocol=protocol, host='127.0.0.1',
305 307 use_direct_calls=False)
306 308
307 309
308 310 class MockRequest(object):
309 311 def __init__(self, request):
310 312 self.request = request
311 313 self.input_stream = StringIO(b'{}'.format(self.request))
312 314 self.output_stream = StringIO()
313 315
314 316 def makefile(self, mode, *args, **kwargs):
315 317 return self.output_stream if mode == 'wb' else self.input_stream
316 318
317 319
318 320 class MockServer(object):
319 321 def __init__(self, Handler, request):
320 322 ip_port = ('0.0.0.0', 8888)
321 323 self.request = MockRequest(request)
322 324 self.handler = Handler(self.request, ip_port, self)
323 325
324 326
325 327 @pytest.fixture
326 328 def tcp_server():
327 329 server = mock.Mock()
328 330 server.server_address = ('127.0.0.1', 8881)
329 331 return server
@@ -1,297 +1,298 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import json
22 22 import platform
23 23 import socket
24 24
25 25 import pytest
26 26
27 27 from rhodecode.lib.pyramid_utils import get_app_config
28 28 from rhodecode.tests.fixture import TestINI
29 29 from rhodecode.tests.server_utils import RcVCSServer
30 30
31 31
32 32 def _parse_json(value):
33 33 return json.loads(value) if value else None
34 34
35 35
36 36 def pytest_addoption(parser):
37 37 parser.addoption(
38 38 '--test-loglevel', dest='test_loglevel',
39 39 help="Set default Logging level for tests, warn (default), info, debug")
40 40 group = parser.getgroup('pylons')
41 41 group.addoption(
42 42 '--with-pylons', dest='pyramid_config',
43 43 help="Set up a Pylons environment with the specified config file.")
44 44 group.addoption(
45 45 '--ini-config-override', action='store', type=_parse_json,
46 46 default=None, dest='pyramid_config_override', help=(
47 47 "Overrides the .ini file settings. Should be specified in JSON"
48 48 " format, e.g. '{\"section\": {\"parameter\": \"value\", ...}}'"
49 49 )
50 50 )
51 51 parser.addini(
52 52 'pyramid_config',
53 53 "Set up a Pyramid environment with the specified config file.")
54 54
55 55 vcsgroup = parser.getgroup('vcs')
56 56 vcsgroup.addoption(
57 57 '--without-vcsserver', dest='with_vcsserver', action='store_false',
58 58 help="Do not start the VCSServer in a background process.")
59 59 vcsgroup.addoption(
60 60 '--with-vcsserver-http', dest='vcsserver_config_http',
61 61 help="Start the HTTP VCSServer with the specified config file.")
62 62 vcsgroup.addoption(
63 63 '--vcsserver-protocol', dest='vcsserver_protocol',
64 64 help="Start the VCSServer with HTTP protocol support.")
65 65 vcsgroup.addoption(
66 66 '--vcsserver-config-override', action='store', type=_parse_json,
67 67 default=None, dest='vcsserver_config_override', help=(
68 68 "Overrides the .ini file settings for the VCSServer. "
69 69 "Should be specified in JSON "
70 70 "format, e.g. '{\"section\": {\"parameter\": \"value\", ...}}'"
71 71 )
72 72 )
73 73 vcsgroup.addoption(
74 74 '--vcsserver-port', action='store', type=int,
75 75 default=None, help=(
76 76 "Allows to set the port of the vcsserver. Useful when testing "
77 77 "against an already running server and random ports cause "
78 78 "trouble."))
79 79 parser.addini(
80 80 'vcsserver_config_http',
81 81 "Start the HTTP VCSServer with the specified config file.")
82 82 parser.addini(
83 83 'vcsserver_protocol',
84 84 "Start the VCSServer with HTTP protocol support.")
85 85
86 86
87 87 @pytest.fixture(scope='session')
88 88 def vcsserver(request, vcsserver_port, vcsserver_factory):
89 89 """
90 90 Session scope VCSServer.
91 91
92 92 Tests wich need the VCSServer have to rely on this fixture in order
93 93 to ensure it will be running.
94 94
95 95 For specific needs, the fixture vcsserver_factory can be used. It allows to
96 96 adjust the configuration file for the test run.
97 97
98 98 Command line args:
99 99
100 100 --without-vcsserver: Allows to switch this fixture off. You have to
101 101 manually start the server.
102 102
103 103 --vcsserver-port: Will expect the VCSServer to listen on this port.
104 104 """
105 105
106 106 if not request.config.getoption('with_vcsserver'):
107 107 return None
108 108
109 109 return vcsserver_factory(
110 110 request, vcsserver_port=vcsserver_port)
111 111
112 112
113 113 @pytest.fixture(scope='session')
114 114 def vcsserver_factory(tmpdir_factory):
115 115 """
116 116 Use this if you need a running vcsserver with a special configuration.
117 117 """
118 118
119 119 def factory(request, overrides=(), vcsserver_port=None,
120 120 log_file=None):
121 121
122 122 if vcsserver_port is None:
123 123 vcsserver_port = get_available_port()
124 124
125 125 overrides = list(overrides)
126 126 overrides.append({'server:main': {'port': vcsserver_port}})
127 127
128 128 if is_cygwin():
129 129 platform_override = {'DEFAULT': {
130 130 'beaker.cache.repo_object.type': 'nocache'}}
131 131 overrides.append(platform_override)
132 132
133 133 option_name = 'vcsserver_config_http'
134 134 override_option_name = 'vcsserver_config_override'
135 135 config_file = get_config(
136 136 request.config, option_name=option_name,
137 137 override_option_name=override_option_name, overrides=overrides,
138 138 basetemp=tmpdir_factory.getbasetemp().strpath,
139 139 prefix='test_vcs_')
140 140
141 141 server = RcVCSServer(config_file, log_file)
142 142 server.start()
143 143
144 144 @request.addfinalizer
145 145 def cleanup():
146 146 server.shutdown()
147 147
148 148 server.wait_until_ready()
149 149 return server
150 150
151 151 return factory
152 152
153 153
154 154 def is_cygwin():
155 155 return 'cygwin' in platform.system().lower()
156 156
157 157
158 158 def _use_log_level(config):
159 159 level = config.getoption('test_loglevel') or 'warn'
160 160 return level.upper()
161 161
162 162
163 163 @pytest.fixture(scope='session')
164 164 def ini_config(request, tmpdir_factory, rcserver_port, vcsserver_port):
165 165 option_name = 'pyramid_config'
166 166 log_level = _use_log_level(request.config)
167 167
168 168 overrides = [
169 169 {'server:main': {'port': rcserver_port}},
170 170 {'app:main': {
171 171 'vcs.server': 'localhost:%s' % vcsserver_port,
172 172 # johbo: We will always start the VCSServer on our own based on the
173 173 # fixtures of the test cases. For the test run it must always be
174 174 # off in the INI file.
175 175 'vcs.start_server': 'false',
176 176
177 177 'vcs.server.protocol': 'http',
178 178 'vcs.scm_app_implementation': 'http',
179 179 'vcs.hooks.protocol': 'http',
180 'vcs.hooks.host': '127.0.0.1',
180 181 }},
181 182
182 183 {'handler_console': {
183 184 'class ': 'StreamHandler',
184 185 'args ': '(sys.stderr,)',
185 186 'level': log_level,
186 187 }},
187 188
188 189 ]
189 190
190 191 filename = get_config(
191 192 request.config, option_name=option_name,
192 193 override_option_name='{}_override'.format(option_name),
193 194 overrides=overrides,
194 195 basetemp=tmpdir_factory.getbasetemp().strpath,
195 196 prefix='test_rce_')
196 197 return filename
197 198
198 199
199 200 @pytest.fixture(scope='session')
200 201 def ini_settings(ini_config):
201 202 ini_path = ini_config
202 203 return get_app_config(ini_path)
203 204
204 205
205 206 def get_available_port():
206 207 family = socket.AF_INET
207 208 socktype = socket.SOCK_STREAM
208 209 host = '127.0.0.1'
209 210
210 211 mysocket = socket.socket(family, socktype)
211 212 mysocket.bind((host, 0))
212 213 port = mysocket.getsockname()[1]
213 214 mysocket.close()
214 215 del mysocket
215 216 return port
216 217
217 218
218 219 @pytest.fixture(scope='session')
219 220 def rcserver_port(request):
220 221 port = get_available_port()
221 222 print('Using rcserver port {}'.format(port))
222 223 return port
223 224
224 225
225 226 @pytest.fixture(scope='session')
226 227 def vcsserver_port(request):
227 228 port = request.config.getoption('--vcsserver-port')
228 229 if port is None:
229 230 port = get_available_port()
230 231 print('Using vcsserver port {}'.format(port))
231 232 return port
232 233
233 234
234 235 @pytest.fixture(scope='session')
235 236 def available_port_factory():
236 237 """
237 238 Returns a callable which returns free port numbers.
238 239 """
239 240 return get_available_port
240 241
241 242
242 243 @pytest.fixture
243 244 def available_port(available_port_factory):
244 245 """
245 246 Gives you one free port for the current test.
246 247
247 248 Uses "available_port_factory" to retrieve the port.
248 249 """
249 250 return available_port_factory()
250 251
251 252
252 253 @pytest.fixture(scope='session')
253 254 def testini_factory(tmpdir_factory, ini_config):
254 255 """
255 256 Factory to create an INI file based on TestINI.
256 257
257 258 It will make sure to place the INI file in the correct directory.
258 259 """
259 260 basetemp = tmpdir_factory.getbasetemp().strpath
260 261 return TestIniFactory(basetemp, ini_config)
261 262
262 263
263 264 class TestIniFactory(object):
264 265
265 266 def __init__(self, basetemp, template_ini):
266 267 self._basetemp = basetemp
267 268 self._template_ini = template_ini
268 269
269 270 def __call__(self, ini_params, new_file_prefix='test'):
270 271 ini_file = TestINI(
271 272 self._template_ini, ini_params=ini_params,
272 273 new_file_prefix=new_file_prefix, dir=self._basetemp)
273 274 result = ini_file.create()
274 275 return result
275 276
276 277
277 278 def get_config(
278 279 config, option_name, override_option_name, overrides=None,
279 280 basetemp=None, prefix='test'):
280 281 """
281 282 Find a configuration file and apply overrides for the given `prefix`.
282 283 """
283 284 config_file = (
284 285 config.getoption(option_name) or config.getini(option_name))
285 286 if not config_file:
286 287 pytest.exit(
287 288 "Configuration error, could not extract {}.".format(option_name))
288 289
289 290 overrides = overrides or []
290 291 config_override = config.getoption(override_option_name)
291 292 if config_override:
292 293 overrides.append(config_override)
293 294 temp_ini_file = TestINI(
294 295 config_file, ini_params=overrides, new_file_prefix=prefix,
295 296 dir=basetemp)
296 297
297 298 return temp_ini_file.create()
@@ -1,686 +1,687 b''
1 1
2 2
3 3 ################################################################################
4 4 ## RHODECODE COMMUNITY EDITION CONFIGURATION ##
5 5 # The %(here)s variable will be replaced with the parent directory of this file#
6 6 ################################################################################
7 7
8 8 [DEFAULT]
9 9 debug = true
10 10
11 11 ################################################################################
12 12 ## EMAIL CONFIGURATION ##
13 13 ## Uncomment and replace with the email address which should receive ##
14 14 ## any error reports after an application crash ##
15 15 ## Additionally these settings will be used by the RhodeCode mailing system ##
16 16 ################################################################################
17 17
18 18 ## prefix all emails subjects with given prefix, helps filtering out emails
19 19 #email_prefix = [RhodeCode]
20 20
21 21 ## email FROM address all mails will be sent
22 22 #app_email_from = rhodecode-noreply@localhost
23 23
24 24 ## Uncomment and replace with the address which should receive any error report
25 25 ## note: using appenlight for error handling doesn't need this to be uncommented
26 26 #email_to = admin@localhost
27 27
28 28 ## in case of Application errors, sent an error email form
29 29 #error_email_from = rhodecode_error@localhost
30 30
31 31 ## additional error message to be send in case of server crash
32 32 #error_message =
33 33
34 34
35 35 #smtp_server = mail.server.com
36 36 #smtp_username =
37 37 #smtp_password =
38 38 #smtp_port =
39 39 #smtp_use_tls = false
40 40 #smtp_use_ssl = true
41 41 ## Specify available auth parameters here (e.g. LOGIN PLAIN CRAM-MD5, etc.)
42 42 #smtp_auth =
43 43
44 44 [server:main]
45 45 ## COMMON ##
46 46 host = 0.0.0.0
47 47 port = 5000
48 48
49 49 ##########################
50 50 ## GUNICORN WSGI SERVER ##
51 51 ##########################
52 52 ## run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
53 53
54 54 use = egg:gunicorn#main
55 55 ## Sets the number of process workers. You must set `instance_id = *`
56 56 ## when this option is set to more than one worker, recommended
57 57 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
58 58 ## The `instance_id = *` must be set in the [app:main] section below
59 59 #workers = 2
60 60 ## number of threads for each of the worker, must be set to 1 for gevent
61 61 ## generally recommened to be at 1
62 62 #threads = 1
63 63 ## process name
64 64 #proc_name = rhodecode
65 65 ## type of worker class, one of sync, gevent
66 66 ## recommended for bigger setup is using of of other than sync one
67 67 #worker_class = sync
68 68 ## The maximum number of simultaneous clients. Valid only for Gevent
69 69 #worker_connections = 10
70 70 ## max number of requests that worker will handle before being gracefully
71 71 ## restarted, could prevent memory leaks
72 72 #max_requests = 1000
73 73 #max_requests_jitter = 30
74 74 ## amount of time a worker can spend with handling a request before it
75 75 ## gets killed and restarted. Set to 6hrs
76 76 #timeout = 21600
77 77
78 78 ## prefix middleware for RhodeCode.
79 79 ## recommended when using proxy setup.
80 80 ## allows to set RhodeCode under a prefix in server.
81 81 ## eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
82 82 ## And set your prefix like: `prefix = /custom_prefix`
83 83 ## be sure to also set beaker.session.cookie_path = /custom_prefix if you need
84 84 ## to make your cookies only work on prefix url
85 85 [filter:proxy-prefix]
86 86 use = egg:PasteDeploy#prefix
87 87 prefix = /
88 88
89 89 [app:main]
90 90 is_test = True
91 91 use = egg:rhodecode-enterprise-ce
92 92
93 93 ## enable proxy prefix middleware, defined above
94 94 #filter-with = proxy-prefix
95 95
96 96
97 97 ## RHODECODE PLUGINS ##
98 98 rhodecode.includes = rhodecode.api
99 99
100 100 # api prefix url
101 101 rhodecode.api.url = /_admin/api
102 102
103 103
104 104 ## END RHODECODE PLUGINS ##
105 105
106 106 ## encryption key used to encrypt social plugin tokens,
107 107 ## remote_urls with credentials etc, if not set it defaults to
108 108 ## `beaker.session.secret`
109 109 #rhodecode.encrypted_values.secret =
110 110
111 111 ## decryption strict mode (enabled by default). It controls if decryption raises
112 112 ## `SignatureVerificationError` in case of wrong key, or damaged encryption data.
113 113 #rhodecode.encrypted_values.strict = false
114 114
115 115 ## return gzipped responses from Rhodecode (static files/application)
116 116 gzip_responses = false
117 117
118 118 ## autogenerate javascript routes file on startup
119 119 generate_js_files = false
120 120
121 121 ## Optional Languages
122 122 ## en(default), be, de, es, fr, it, ja, pl, pt, ru, zh
123 123 lang = en
124 124
125 125 ## perform a full repository scan on each server start, this should be
126 126 ## set to false after first startup, to allow faster server restarts.
127 127 startup.import_repos = true
128 128
129 129 ## Uncomment and set this path to use archive download cache.
130 130 ## Once enabled, generated archives will be cached at this location
131 131 ## and served from the cache during subsequent requests for the same archive of
132 132 ## the repository.
133 133 #archive_cache_dir = /tmp/tarballcache
134 134
135 135 ## URL at which the application is running. This is used for bootstraping
136 136 ## requests in context when no web request is available. Used in ishell, or
137 137 ## SSH calls. Set this for events to receive proper url for SSH calls.
138 138 app.base_url = http://rhodecode.local
139 139
140 140 ## change this to unique ID for security
141 141 app_instance_uuid = rc-production
142 142
143 143 ## cut off limit for large diffs (size in bytes)
144 144 cut_off_limit_diff = 1024000
145 145 cut_off_limit_file = 256000
146 146
147 147 ## use cache version of scm repo everywhere
148 148 vcs_full_cache = false
149 149
150 150 ## force https in RhodeCode, fixes https redirects, assumes it's always https
151 151 ## Normally this is controlled by proper http flags sent from http server
152 152 force_https = false
153 153
154 154 ## use Strict-Transport-Security headers
155 155 use_htsts = false
156 156
157 157 ## git rev filter option, --all is the default filter, if you need to
158 158 ## hide all refs in changelog switch this to --branches --tags
159 159 git_rev_filter = --all
160 160
161 161 # Set to true if your repos are exposed using the dumb protocol
162 162 git_update_server_info = false
163 163
164 164 ## RSS/ATOM feed options
165 165 rss_cut_off_limit = 256000
166 166 rss_items_per_page = 10
167 167 rss_include_diff = false
168 168
169 169 ## gist URL alias, used to create nicer urls for gist. This should be an
170 170 ## url that does rewrites to _admin/gists/{gistid}.
171 171 ## example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
172 172 ## RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
173 173 gist_alias_url =
174 174
175 175 ## List of views (using glob pattern syntax) that AUTH TOKENS could be
176 176 ## used for access.
177 177 ## Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
178 178 ## came from the the logged in user who own this authentication token.
179 179 ## Additionally @TOKEN syntaxt can be used to bound the view to specific
180 180 ## authentication token. Such view would be only accessible when used together
181 181 ## with this authentication token
182 182 ##
183 183 ## list of all views can be found under `/_admin/permissions/auth_token_access`
184 184 ## The list should be "," separated and on a single line.
185 185 ##
186 186 ## Most common views to enable:
187 187 # RepoCommitsView:repo_commit_download
188 188 # RepoCommitsView:repo_commit_patch
189 189 # RepoCommitsView:repo_commit_raw
190 190 # RepoCommitsView:repo_commit_raw@TOKEN
191 191 # RepoFilesView:repo_files_diff
192 192 # RepoFilesView:repo_archivefile
193 193 # RepoFilesView:repo_file_raw
194 194 # GistView:*
195 195 api_access_controllers_whitelist =
196 196
197 197 ## default encoding used to convert from and to unicode
198 198 ## can be also a comma separated list of encoding in case of mixed encodings
199 199 default_encoding = UTF-8
200 200
201 201 ## instance-id prefix
202 202 ## a prefix key for this instance used for cache invalidation when running
203 203 ## multiple instances of rhodecode, make sure it's globally unique for
204 204 ## all running rhodecode instances. Leave empty if you don't use it
205 205 instance_id =
206 206
207 207 ## Fallback authentication plugin. Set this to a plugin ID to force the usage
208 208 ## of an authentication plugin also if it is disabled by it's settings.
209 209 ## This could be useful if you are unable to log in to the system due to broken
210 210 ## authentication settings. Then you can enable e.g. the internal rhodecode auth
211 211 ## module to log in again and fix the settings.
212 212 ##
213 213 ## Available builtin plugin IDs (hash is part of the ID):
214 214 ## egg:rhodecode-enterprise-ce#rhodecode
215 215 ## egg:rhodecode-enterprise-ce#pam
216 216 ## egg:rhodecode-enterprise-ce#ldap
217 217 ## egg:rhodecode-enterprise-ce#jasig_cas
218 218 ## egg:rhodecode-enterprise-ce#headers
219 219 ## egg:rhodecode-enterprise-ce#crowd
220 220 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
221 221
222 222 ## alternative return HTTP header for failed authentication. Default HTTP
223 223 ## response is 401 HTTPUnauthorized. Currently HG clients have troubles with
224 224 ## handling that causing a series of failed authentication calls.
225 225 ## Set this variable to 403 to return HTTPForbidden, or any other HTTP code
226 226 ## This will be served instead of default 401 on bad authnetication
227 227 auth_ret_code =
228 228
229 229 ## use special detection method when serving auth_ret_code, instead of serving
230 230 ## ret_code directly, use 401 initially (Which triggers credentials prompt)
231 231 ## and then serve auth_ret_code to clients
232 232 auth_ret_code_detection = false
233 233
234 234 ## locking return code. When repository is locked return this HTTP code. 2XX
235 235 ## codes don't break the transactions while 4XX codes do
236 236 lock_ret_code = 423
237 237
238 238 ## allows to change the repository location in settings page
239 239 allow_repo_location_change = true
240 240
241 241 ## allows to setup custom hooks in settings page
242 242 allow_custom_hooks_settings = true
243 243
244 244 ## generated license token, goto license page in RhodeCode settings to obtain
245 245 ## new token
246 246 license_token = abra-cada-bra1-rce3
247 247
248 248 ## supervisor connection uri, for managing supervisor and logs.
249 249 supervisor.uri =
250 250 ## supervisord group name/id we only want this RC instance to handle
251 251 supervisor.group_id = dev
252 252
253 253 ## Display extended labs settings
254 254 labs_settings_active = true
255 255
256 256 ####################################
257 257 ### CELERY CONFIG ####
258 258 ####################################
259 259 use_celery = false
260 260 broker.host = localhost
261 261 broker.vhost = rabbitmqhost
262 262 broker.port = 5672
263 263 broker.user = rabbitmq
264 264 broker.password = qweqwe
265 265
266 266 celery.imports = rhodecode.lib.celerylib.tasks
267 267
268 268 celery.result.backend = amqp
269 269 celery.result.dburi = amqp://
270 270 celery.result.serialier = json
271 271
272 272 #celery.send.task.error.emails = true
273 273 #celery.amqp.task.result.expires = 18000
274 274
275 275 celeryd.concurrency = 2
276 276 #celeryd.log.file = celeryd.log
277 277 celeryd.log.level = debug
278 278 celeryd.max.tasks.per.child = 1
279 279
280 280 ## tasks will never be sent to the queue, but executed locally instead.
281 281 celery.always.eager = false
282 282
283 283 ####################################
284 284 ### BEAKER CACHE ####
285 285 ####################################
286 286 # default cache dir for templates. Putting this into a ramdisk
287 287 ## can boost performance, eg. %(here)s/data_ramdisk
288 288 cache_dir = %(here)s/data
289 289
290 290 ## locking and default file storage for Beaker. Putting this into a ramdisk
291 291 ## can boost performance, eg. %(here)s/data_ramdisk/cache/beaker_data
292 292 beaker.cache.data_dir = %(here)s/rc/data/cache/beaker_data
293 293 beaker.cache.lock_dir = %(here)s/rc/data/cache/beaker_lock
294 294
295 295 beaker.cache.regions = short_term, long_term, sql_cache_short, auth_plugins, repo_cache_long
296 296
297 297 beaker.cache.short_term.type = file
298 298 beaker.cache.short_term.expire = 0
299 299 beaker.cache.short_term.key_length = 256
300 300
301 301 beaker.cache.long_term.type = memory
302 302 beaker.cache.long_term.expire = 36000
303 303 beaker.cache.long_term.key_length = 256
304 304
305 305 beaker.cache.sql_cache_short.type = memory
306 306 beaker.cache.sql_cache_short.expire = 1
307 307 beaker.cache.sql_cache_short.key_length = 256
308 308
309 309 ## default is memory cache, configure only if required
310 310 ## using multi-node or multi-worker setup
311 311 #beaker.cache.auth_plugins.type = memory
312 312 #beaker.cache.auth_plugins.lock_dir = %(here)s/data/cache/auth_plugin_lock
313 313 #beaker.cache.auth_plugins.url = postgresql://postgres:secret@localhost/rhodecode
314 314 #beaker.cache.auth_plugins.url = mysql://root:secret@127.0.0.1/rhodecode
315 315 #beaker.cache.auth_plugins.sa.pool_recycle = 3600
316 316 #beaker.cache.auth_plugins.sa.pool_size = 10
317 317 #beaker.cache.auth_plugins.sa.max_overflow = 0
318 318
319 319 beaker.cache.repo_cache_long.type = memorylru_base
320 320 beaker.cache.repo_cache_long.max_items = 4096
321 321 beaker.cache.repo_cache_long.expire = 2592000
322 322
323 323 ## default is memorylru_base cache, configure only if required
324 324 ## using multi-node or multi-worker setup
325 325 #beaker.cache.repo_cache_long.type = ext:memcached
326 326 #beaker.cache.repo_cache_long.url = localhost:11211
327 327 #beaker.cache.repo_cache_long.expire = 1209600
328 328 #beaker.cache.repo_cache_long.key_length = 256
329 329
330 330 ####################################
331 331 ### BEAKER SESSION ####
332 332 ####################################
333 333
334 334 ## .session.type is type of storage options for the session, current allowed
335 335 ## types are file, ext:memcached, ext:database, and memory (default).
336 336 beaker.session.type = file
337 337 beaker.session.data_dir = %(here)s/rc/data/sessions/data
338 338
339 339 ## db based session, fast, and allows easy management over logged in users
340 340 #beaker.session.type = ext:database
341 341 #beaker.session.table_name = db_session
342 342 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
343 343 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
344 344 #beaker.session.sa.pool_recycle = 3600
345 345 #beaker.session.sa.echo = false
346 346
347 347 beaker.session.key = rhodecode
348 348 beaker.session.secret = test-rc-uytcxaz
349 349 beaker.session.lock_dir = %(here)s/rc/data/sessions/lock
350 350
351 351 ## Secure encrypted cookie. Requires AES and AES python libraries
352 352 ## you must disable beaker.session.secret to use this
353 353 #beaker.session.encrypt_key = key_for_encryption
354 354 #beaker.session.validate_key = validation_key
355 355
356 356 ## sets session as invalid(also logging out user) if it haven not been
357 357 ## accessed for given amount of time in seconds
358 358 beaker.session.timeout = 2592000
359 359 beaker.session.httponly = true
360 360 ## Path to use for the cookie. Set to prefix if you use prefix middleware
361 361 #beaker.session.cookie_path = /custom_prefix
362 362
363 363 ## uncomment for https secure cookie
364 364 beaker.session.secure = false
365 365
366 366 ## auto save the session to not to use .save()
367 367 beaker.session.auto = false
368 368
369 369 ## default cookie expiration time in seconds, set to `true` to set expire
370 370 ## at browser close
371 371 #beaker.session.cookie_expires = 3600
372 372
373 373 ###################################
374 374 ## SEARCH INDEXING CONFIGURATION ##
375 375 ###################################
376 376 ## Full text search indexer is available in rhodecode-tools under
377 377 ## `rhodecode-tools index` command
378 378
379 379 ## WHOOSH Backend, doesn't require additional services to run
380 380 ## it works good with few dozen repos
381 381 search.module = rhodecode.lib.index.whoosh
382 382 search.location = %(here)s/data/index
383 383
384 384 ########################################
385 385 ### CHANNELSTREAM CONFIG ####
386 386 ########################################
387 387 ## channelstream enables persistent connections and live notification
388 388 ## in the system. It's also used by the chat system
389 389
390 390 channelstream.enabled = false
391 391
392 392 ## server address for channelstream server on the backend
393 393 channelstream.server = 127.0.0.1:9800
394 394 ## location of the channelstream server from outside world
395 395 ## use ws:// for http or wss:// for https. This address needs to be handled
396 396 ## by external HTTP server such as Nginx or Apache
397 397 ## see nginx/apache configuration examples in our docs
398 398 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
399 399 channelstream.secret = secret
400 400 channelstream.history.location = %(here)s/channelstream_history
401 401
402 402 ## Internal application path that Javascript uses to connect into.
403 403 ## If you use proxy-prefix the prefix should be added before /_channelstream
404 404 channelstream.proxy_path = /_channelstream
405 405
406 406
407 407 ###################################
408 408 ## APPENLIGHT CONFIG ##
409 409 ###################################
410 410
411 411 ## Appenlight is tailored to work with RhodeCode, see
412 412 ## http://appenlight.com for details how to obtain an account
413 413
414 414 ## appenlight integration enabled
415 415 appenlight = false
416 416
417 417 appenlight.server_url = https://api.appenlight.com
418 418 appenlight.api_key = YOUR_API_KEY
419 419 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
420 420
421 421 # used for JS client
422 422 appenlight.api_public_key = YOUR_API_PUBLIC_KEY
423 423
424 424 ## TWEAK AMOUNT OF INFO SENT HERE
425 425
426 426 ## enables 404 error logging (default False)
427 427 appenlight.report_404 = false
428 428
429 429 ## time in seconds after request is considered being slow (default 1)
430 430 appenlight.slow_request_time = 1
431 431
432 432 ## record slow requests in application
433 433 ## (needs to be enabled for slow datastore recording and time tracking)
434 434 appenlight.slow_requests = true
435 435
436 436 ## enable hooking to application loggers
437 437 appenlight.logging = true
438 438
439 439 ## minimum log level for log capture
440 440 appenlight.logging.level = WARNING
441 441
442 442 ## send logs only from erroneous/slow requests
443 443 ## (saves API quota for intensive logging)
444 444 appenlight.logging_on_error = false
445 445
446 446 ## list of additonal keywords that should be grabbed from environ object
447 447 ## can be string with comma separated list of words in lowercase
448 448 ## (by default client will always send following info:
449 449 ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
450 450 ## start with HTTP* this list be extended with additional keywords here
451 451 appenlight.environ_keys_whitelist =
452 452
453 453 ## list of keywords that should be blanked from request object
454 454 ## can be string with comma separated list of words in lowercase
455 455 ## (by default client will always blank keys that contain following words
456 456 ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
457 457 ## this list be extended with additional keywords set here
458 458 appenlight.request_keys_blacklist =
459 459
460 460 ## list of namespaces that should be ignores when gathering log entries
461 461 ## can be string with comma separated list of namespaces
462 462 ## (by default the client ignores own entries: appenlight_client.client)
463 463 appenlight.log_namespace_blacklist =
464 464
465 465
466 466 ################################################################################
467 467 ## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT* ##
468 468 ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ##
469 469 ## execute malicious code after an exception is raised. ##
470 470 ################################################################################
471 471 set debug = false
472 472
473 473
474 474 ##############
475 475 ## STYLING ##
476 476 ##############
477 477 debug_style = false
478 478
479 479 ###########################################
480 480 ### MAIN RHODECODE DATABASE CONFIG ###
481 481 ###########################################
482 482 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode_test.db?timeout=30
483 483 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode_test
484 484 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode_test
485 485 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode_test.db?timeout=30
486 486
487 487 # see sqlalchemy docs for other advanced settings
488 488
489 489 ## print the sql statements to output
490 490 sqlalchemy.db1.echo = false
491 491 ## recycle the connections after this amount of seconds
492 492 sqlalchemy.db1.pool_recycle = 3600
493 493 sqlalchemy.db1.convert_unicode = true
494 494
495 495 ## the number of connections to keep open inside the connection pool.
496 496 ## 0 indicates no limit
497 497 #sqlalchemy.db1.pool_size = 5
498 498
499 499 ## the number of connections to allow in connection pool "overflow", that is
500 500 ## connections that can be opened above and beyond the pool_size setting,
501 501 ## which defaults to five.
502 502 #sqlalchemy.db1.max_overflow = 10
503 503
504 504
505 505 ##################
506 506 ### VCS CONFIG ###
507 507 ##################
508 508 vcs.server.enable = true
509 509 vcs.server = localhost:9901
510 510
511 511 ## Web server connectivity protocol, responsible for web based VCS operatations
512 512 ## Available protocols are:
513 513 ## `http` - use http-rpc backend (default)
514 514 vcs.server.protocol = http
515 515
516 516 ## Push/Pull operations protocol, available options are:
517 517 ## `http` - use http-rpc backend (default)
518 518 ## `vcsserver.scm_app` - internal app (EE only)
519 519 vcs.scm_app_implementation = http
520 520
521 521 ## Push/Pull operations hooks protocol, available options are:
522 522 ## `http` - use http-rpc backend (default)
523 523 vcs.hooks.protocol = http
524 vcs.hooks.host = 127.0.0.1
524 525
525 526 vcs.server.log_level = debug
526 527 ## Start VCSServer with this instance as a subprocess, usefull for development
527 528 vcs.start_server = false
528 529
529 530 ## List of enabled VCS backends, available options are:
530 531 ## `hg` - mercurial
531 532 ## `git` - git
532 533 ## `svn` - subversion
533 534 vcs.backends = hg, git, svn
534 535
535 536 vcs.connection_timeout = 3600
536 537 ## Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
537 538 ## Available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
538 539 #vcs.svn.compatible_version = pre-1.8-compatible
539 540
540 541
541 542 ############################################################
542 543 ### Subversion proxy support (mod_dav_svn) ###
543 544 ### Maps RhodeCode repo groups into SVN paths for Apache ###
544 545 ############################################################
545 546 ## Enable or disable the config file generation.
546 547 svn.proxy.generate_config = false
547 548 ## Generate config file with `SVNListParentPath` set to `On`.
548 549 svn.proxy.list_parent_path = true
549 550 ## Set location and file name of generated config file.
550 551 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
551 552 ## Used as a prefix to the `Location` block in the generated config file.
552 553 ## In most cases it should be set to `/`.
553 554 svn.proxy.location_root = /
554 555 ## Command to reload the mod dav svn configuration on change.
555 556 ## Example: `/etc/init.d/apache2 reload`
556 557 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
557 558 ## If the timeout expires before the reload command finishes, the command will
558 559 ## be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
559 560 #svn.proxy.reload_timeout = 10
560 561
561 562 ############################################################
562 563 ### SSH Support Settings ###
563 564 ############################################################
564 565
565 566 ## Defines if the authorized_keys file should be written on any change of
566 567 ## user ssh keys, setting this to false also disables posibility of adding
567 568 ## ssh keys for users from web interface.
568 569 ssh.generate_authorized_keyfile = true
569 570
570 571 ## Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
571 572 # ssh.authorized_keys_ssh_opts =
572 573
573 574 ## File to generate the authorized keys together with options
574 575 ## It is possible to have multiple key files specified in `sshd_config` e.g.
575 576 ## AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
576 577 ssh.authorized_keys_file_path = %(here)s/rc/authorized_keys_rhodecode
577 578
578 579 ## Command to execute the SSH wrapper. The binary is available in the
579 580 ## rhodecode installation directory.
580 581 ## e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
581 582 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
582 583
583 584 ## Allow shell when executing the ssh-wrapper command
584 585 ssh.wrapper_cmd_allow_shell = false
585 586
586 587 ## Enables logging, and detailed output send back to the client. Usefull for
587 588 ## debugging, shouldn't be used in production.
588 589 ssh.enable_debug_logging = false
589 590
590 591 ## Paths to binary executrables, by default they are the names, but we can
591 592 ## override them if we want to use a custom one
592 593 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
593 594 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
594 595 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
595 596
596 597
597 598 ## Dummy marker to add new entries after.
598 599 ## Add any custom entries below. Please don't remove.
599 600 custom.conf = 1
600 601
601 602
602 603 ################################
603 604 ### LOGGING CONFIGURATION ####
604 605 ################################
605 606 [loggers]
606 607 keys = root, sqlalchemy, beaker, rhodecode, ssh_wrapper
607 608
608 609 [handlers]
609 610 keys = console, console_sql
610 611
611 612 [formatters]
612 613 keys = generic, color_formatter, color_formatter_sql
613 614
614 615 #############
615 616 ## LOGGERS ##
616 617 #############
617 618 [logger_root]
618 619 level = NOTSET
619 620 handlers = console
620 621
621 622 [logger_routes]
622 623 level = DEBUG
623 624 handlers =
624 625 qualname = routes.middleware
625 626 ## "level = DEBUG" logs the route matched and routing variables.
626 627 propagate = 1
627 628
628 629 [logger_beaker]
629 630 level = DEBUG
630 631 handlers =
631 632 qualname = beaker.container
632 633 propagate = 1
633 634
634 635 [logger_rhodecode]
635 636 level = DEBUG
636 637 handlers =
637 638 qualname = rhodecode
638 639 propagate = 1
639 640
640 641 [logger_sqlalchemy]
641 642 level = ERROR
642 643 handlers = console_sql
643 644 qualname = sqlalchemy.engine
644 645 propagate = 0
645 646
646 647 [logger_ssh_wrapper]
647 648 level = DEBUG
648 649 handlers =
649 650 qualname = ssh_wrapper
650 651 propagate = 1
651 652
652 653
653 654 ##############
654 655 ## HANDLERS ##
655 656 ##############
656 657
657 658 [handler_console]
658 659 class = StreamHandler
659 660 args = (sys.stderr,)
660 661 level = DEBUG
661 662 formatter = generic
662 663
663 664 [handler_console_sql]
664 665 class = StreamHandler
665 666 args = (sys.stderr,)
666 667 level = WARN
667 668 formatter = generic
668 669
669 670 ################
670 671 ## FORMATTERS ##
671 672 ################
672 673
673 674 [formatter_generic]
674 675 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
675 676 format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
676 677 datefmt = %Y-%m-%d %H:%M:%S
677 678
678 679 [formatter_color_formatter]
679 680 class = rhodecode.lib.logging_formatter.ColorFormatter
680 681 format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
681 682 datefmt = %Y-%m-%d %H:%M:%S
682 683
683 684 [formatter_color_formatter_sql]
684 685 class = rhodecode.lib.logging_formatter.ColorFormatterSql
685 686 format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
686 687 datefmt = %Y-%m-%d %H:%M:%S
General Comments 0
You need to be logged in to leave comments. Login now