LCOV - code coverage report
Current view: top level - source3/lib - g_lock.c (source / functions) Hit Total Coverage
Test: coverage report for vadcx-master-patch-75612 fe003de8 Lines: 690 880 78.4 %
Date: 2024-02-29 22:57:05 Functions: 44 47 93.6 %

          Line data    Source code
       1             : /*
       2             :    Unix SMB/CIFS implementation.
       3             :    global locks based on dbwrap and messaging
       4             :    Copyright (C) 2009 by Volker Lendecke
       5             : 
       6             :    This program is free software; you can redistribute it and/or modify
       7             :    it under the terms of the GNU General Public License as published by
       8             :    the Free Software Foundation; either version 3 of the License, or
       9             :    (at your option) any later version.
      10             : 
      11             :    This program is distributed in the hope that it will be useful,
      12             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :    GNU General Public License for more details.
      15             : 
      16             :    You should have received a copy of the GNU General Public License
      17             :    along with this program.  If not, see <http://www.gnu.org/licenses/>.
      18             : */
      19             : 
      20             : #include "replace.h"
      21             : #include "system/filesys.h"
      22             : #include "lib/util/server_id.h"
      23             : #include "lib/util/debug.h"
      24             : #include "lib/util/talloc_stack.h"
      25             : #include "lib/util/samba_util.h"
      26             : #include "lib/util_path.h"
      27             : #include "dbwrap/dbwrap.h"
      28             : #include "dbwrap/dbwrap_open.h"
      29             : #include "dbwrap/dbwrap_watch.h"
      30             : #include "g_lock.h"
      31             : #include "util_tdb.h"
      32             : #include "../lib/util/tevent_ntstatus.h"
      33             : #include "messages.h"
      34             : #include "serverid.h"
      35             : 
      36             : struct g_lock_ctx {
      37             :         struct db_context *db;
      38             :         struct messaging_context *msg;
      39             :         enum dbwrap_lock_order lock_order;
      40             :         bool busy;
      41             : };
      42             : 
      43             : struct g_lock {
      44             :         struct server_id exclusive;
      45             :         size_t num_shared;
      46             :         uint8_t *shared;
      47             :         uint64_t unique_lock_epoch;
      48             :         uint64_t unique_data_epoch;
      49             :         size_t datalen;
      50             :         uint8_t *data;
      51             : };
      52             : 
      53     1879844 : static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
      54             : {
      55        4909 :         struct server_id exclusive;
      56        4909 :         size_t num_shared, shared_len;
      57        4909 :         uint64_t unique_lock_epoch;
      58        4909 :         uint64_t unique_data_epoch;
      59             : 
      60     1879844 :         if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
      61             :                       sizeof(uint64_t) +     /* seqnum */
      62             :                       sizeof(uint32_t))) {   /* num_shared */
      63     1246482 :                 struct g_lock ret = {
      64             :                         .exclusive.pid = 0,
      65      415494 :                         .unique_lock_epoch = generate_unique_u64(0),
      66      415494 :                         .unique_data_epoch = generate_unique_u64(0),
      67             :                 };
      68      415494 :                 *lck = ret;
      69      415494 :                 return true;
      70             :         }
      71             : 
      72     1464350 :         server_id_get(&exclusive, buf);
      73     1464350 :         buf += SERVER_ID_BUF_LENGTH;
      74     1464350 :         buflen -= SERVER_ID_BUF_LENGTH;
      75             : 
      76     1464350 :         unique_lock_epoch = BVAL(buf, 0);
      77     1464350 :         buf += sizeof(uint64_t);
      78     1464350 :         buflen -= sizeof(uint64_t);
      79             : 
      80     1464350 :         unique_data_epoch = BVAL(buf, 0);
      81     1464350 :         buf += sizeof(uint64_t);
      82     1464350 :         buflen -= sizeof(uint64_t);
      83             : 
      84     1464350 :         num_shared = IVAL(buf, 0);
      85     1464350 :         buf += sizeof(uint32_t);
      86     1464350 :         buflen -= sizeof(uint32_t);
      87             : 
      88     1464350 :         if (num_shared > buflen/SERVER_ID_BUF_LENGTH) {
      89           0 :                 DBG_DEBUG("num_shared=%zu, buflen=%zu\n",
      90             :                           num_shared,
      91             :                           buflen);
      92           0 :                 return false;
      93             :         }
      94             : 
      95     1464350 :         shared_len = num_shared * SERVER_ID_BUF_LENGTH;
      96             : 
      97     1464350 :         *lck = (struct g_lock) {
      98             :                 .exclusive = exclusive,
      99             :                 .num_shared = num_shared,
     100             :                 .shared = buf,
     101             :                 .unique_lock_epoch = unique_lock_epoch,
     102             :                 .unique_data_epoch = unique_data_epoch,
     103     1464350 :                 .datalen = buflen-shared_len,
     104     1464350 :                 .data = buf+shared_len,
     105             :         };
     106             : 
     107     1464350 :         return true;
     108             : }
     109             : 
     110          76 : static void g_lock_get_shared(const struct g_lock *lck,
     111             :                               size_t i,
     112             :                               struct server_id *shared)
     113             : {
     114          69 :         if (i >= lck->num_shared) {
     115           0 :                 abort();
     116             :         }
     117          74 :         server_id_get(shared, lck->shared + i*SERVER_ID_BUF_LENGTH);
     118          69 : }
     119             : 
     120          17 : static void g_lock_del_shared(struct g_lock *lck, size_t i)
     121             : {
     122          17 :         if (i >= lck->num_shared) {
     123           0 :                 abort();
     124             :         }
     125          17 :         lck->num_shared -= 1;
     126          17 :         if (i < lck->num_shared) {
     127          21 :                 memcpy(lck->shared + i*SERVER_ID_BUF_LENGTH,
     128           4 :                        lck->shared + lck->num_shared*SERVER_ID_BUF_LENGTH,
     129             :                        SERVER_ID_BUF_LENGTH);
     130             :         }
     131          17 : }
     132             : 
     133     1187434 : static NTSTATUS g_lock_store(
     134             :         struct db_record *rec,
     135             :         struct g_lock *lck,
     136             :         struct server_id *new_shared,
     137             :         const TDB_DATA *new_dbufs,
     138             :         size_t num_new_dbufs)
     139     1187434 : {
     140        2509 :         uint8_t exclusive[SERVER_ID_BUF_LENGTH];
     141        2509 :         uint8_t seqnum_buf[sizeof(uint64_t)*2];
     142        2509 :         uint8_t sizebuf[sizeof(uint32_t)];
     143        2509 :         uint8_t new_shared_buf[SERVER_ID_BUF_LENGTH];
     144             : 
     145     1187434 :         struct TDB_DATA dbufs[6 + num_new_dbufs];
     146             : 
     147     1187434 :         dbufs[0] = (TDB_DATA) {
     148             :                 .dptr = exclusive, .dsize = sizeof(exclusive),
     149             :         };
     150     1187434 :         dbufs[1] = (TDB_DATA) {
     151             :                 .dptr = seqnum_buf, .dsize = sizeof(seqnum_buf),
     152             :         };
     153     1187434 :         dbufs[2] = (TDB_DATA) {
     154             :                 .dptr = sizebuf, .dsize = sizeof(sizebuf),
     155             :         };
     156     1187434 :         dbufs[3] = (TDB_DATA) {
     157     1187434 :                 .dptr = lck->shared,
     158     1187434 :                 .dsize = lck->num_shared * SERVER_ID_BUF_LENGTH,
     159             :         };
     160     1187434 :         dbufs[4] = (TDB_DATA) { 0 };
     161     1187434 :         dbufs[5] = (TDB_DATA) {
     162     1187434 :                 .dptr = lck->data, .dsize = lck->datalen,
     163             :         };
     164             : 
     165     1187434 :         if (num_new_dbufs != 0) {
     166        1937 :                 memcpy(&dbufs[6],
     167             :                        new_dbufs,
     168             :                        num_new_dbufs * sizeof(TDB_DATA));
     169             :         }
     170             : 
     171     1187434 :         server_id_put(exclusive, lck->exclusive);
     172     1187434 :         SBVAL(seqnum_buf, 0, lck->unique_lock_epoch);
     173     1187434 :         SBVAL(seqnum_buf, 8, lck->unique_data_epoch);
     174             : 
     175     1187434 :         if (new_shared != NULL) {
     176          18 :                 if (lck->num_shared >= UINT32_MAX) {
     177           0 :                         return NT_STATUS_BUFFER_OVERFLOW;
     178             :                 }
     179             : 
     180          18 :                 server_id_put(new_shared_buf, *new_shared);
     181             : 
     182          18 :                 dbufs[4] = (TDB_DATA) {
     183             :                         .dptr = new_shared_buf,
     184             :                         .dsize = sizeof(new_shared_buf),
     185             :                 };
     186             : 
     187          18 :                 lck->num_shared += 1;
     188             :         }
     189             : 
     190     1187434 :         SIVAL(sizebuf, 0, lck->num_shared);
     191             : 
     192     1187434 :         return dbwrap_record_storev(rec, dbufs, ARRAY_SIZE(dbufs), 0);
     193             : }
     194             : 
     195         354 : struct g_lock_ctx *g_lock_ctx_init_backend(
     196             :         TALLOC_CTX *mem_ctx,
     197             :         struct messaging_context *msg,
     198             :         struct db_context **backend)
     199             : {
     200          21 :         struct g_lock_ctx *result;
     201             : 
     202         354 :         result = talloc_zero(mem_ctx, struct g_lock_ctx);
     203         354 :         if (result == NULL) {
     204           0 :                 return NULL;
     205             :         }
     206         354 :         result->msg = msg;
     207         354 :         result->lock_order = DBWRAP_LOCK_ORDER_NONE;
     208             : 
     209         354 :         result->db = db_open_watched(result, backend, msg);
     210         354 :         if (result->db == NULL) {
     211           0 :                 DBG_WARNING("db_open_watched failed\n");
     212           0 :                 TALLOC_FREE(result);
     213           0 :                 return NULL;
     214             :         }
     215         333 :         return result;
     216             : }
     217             : 
     218         195 : void g_lock_set_lock_order(struct g_lock_ctx *ctx,
     219             :                            enum dbwrap_lock_order lock_order)
     220             : {
     221         195 :         ctx->lock_order = lock_order;
     222         195 : }
     223             : 
     224         159 : struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
     225             :                                    struct messaging_context *msg)
     226             : {
     227         159 :         char *db_path = NULL;
     228         159 :         struct db_context *backend = NULL;
     229         159 :         struct g_lock_ctx *ctx = NULL;
     230             : 
     231         159 :         db_path = lock_path(mem_ctx, "g_lock.tdb");
     232         159 :         if (db_path == NULL) {
     233           0 :                 return NULL;
     234             :         }
     235             : 
     236         159 :         backend = db_open(
     237             :                 mem_ctx,
     238             :                 db_path,
     239             :                 0,
     240             :                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH|TDB_VOLATILE,
     241             :                 O_RDWR|O_CREAT,
     242             :                 0600,
     243             :                 DBWRAP_LOCK_ORDER_3,
     244             :                 DBWRAP_FLAG_NONE);
     245         159 :         TALLOC_FREE(db_path);
     246         159 :         if (backend == NULL) {
     247           0 :                 DBG_WARNING("Could not open g_lock.tdb\n");
     248           0 :                 return NULL;
     249             :         }
     250             : 
     251         159 :         ctx = g_lock_ctx_init_backend(mem_ctx, msg, &backend);
     252         159 :         return ctx;
     253             : }
     254             : 
     255         318 : static void g_lock_cleanup_dead(
     256             :         struct g_lock *lck,
     257             :         struct server_id *dead_blocker)
     258             : {
     259          14 :         bool exclusive_died;
     260          14 :         struct server_id_buf tmp;
     261             : 
     262         318 :         if (dead_blocker == NULL) {
     263         315 :                 return;
     264             :         }
     265             : 
     266           3 :         exclusive_died = server_id_equal(dead_blocker, &lck->exclusive);
     267             : 
     268           3 :         if (exclusive_died) {
     269           1 :                 DBG_DEBUG("Exclusive holder %s died\n",
     270             :                           server_id_str_buf(lck->exclusive, &tmp));
     271           1 :                 lck->exclusive.pid = 0;
     272             :         }
     273             : 
     274           3 :         if (lck->num_shared != 0) {
     275           2 :                 bool shared_died;
     276           2 :                 struct server_id shared;
     277             : 
     278           2 :                 g_lock_get_shared(lck, 0, &shared);
     279           2 :                 shared_died = server_id_equal(dead_blocker, &shared);
     280             : 
     281           2 :                 if (shared_died) {
     282           2 :                         DBG_DEBUG("Shared holder %s died\n",
     283             :                                   server_id_str_buf(shared, &tmp));
     284           2 :                         g_lock_del_shared(lck, 0);
     285             :                 }
     286             :         }
     287             : }
     288             : 
     289         301 : static ssize_t g_lock_find_shared(
     290             :         struct g_lock *lck,
     291             :         const struct server_id *self)
     292             : {
     293          11 :         size_t i;
     294             : 
     295         310 :         for (i=0; i<lck->num_shared; i++) {
     296          13 :                 struct server_id shared;
     297          13 :                 bool same;
     298             : 
     299          13 :                 g_lock_get_shared(lck, i, &shared);
     300             : 
     301          13 :                 same = server_id_equal(self, &shared);
     302          13 :                 if (same) {
     303           4 :                         return i;
     304             :                 }
     305             :         }
     306             : 
     307         290 :         return -1;
     308             : }
     309             : 
     310         317 : static void g_lock_cleanup_shared(struct g_lock *lck)
     311             : {
     312          27 :         size_t i;
     313          27 :         struct server_id check;
     314          27 :         bool exists;
     315             : 
     316         317 :         if (lck->num_shared == 0) {
     317         299 :                 return;
     318             :         }
     319             : 
     320             :         /*
     321             :          * Read locks can stay around forever if the process dies. Do
     322             :          * a heuristic check for process existence: Check one random
     323             :          * process for existence. Hopefully this will keep runaway
     324             :          * read locks under control.
     325             :          */
     326          18 :         i = generate_random() % lck->num_shared;
     327          18 :         g_lock_get_shared(lck, i, &check);
     328             : 
     329          18 :         exists = serverid_exists(&check);
     330          18 :         if (!exists) {
     331           6 :                 struct server_id_buf tmp;
     332           6 :                 DBG_DEBUG("Shared locker %s died -- removing\n",
     333             :                           server_id_str_buf(check, &tmp));
     334           6 :                 g_lock_del_shared(lck, i);
     335             :         }
     336             : }
     337             : 
     338             : struct g_lock_lock_cb_state {
     339             :         struct g_lock_ctx *ctx;
     340             :         struct db_record *rec;
     341             :         struct g_lock *lck;
     342             :         struct server_id *new_shared;
     343             :         g_lock_lock_cb_fn_t cb_fn;
     344             :         void *cb_private;
     345             :         TALLOC_CTX *update_mem_ctx;
     346             :         TDB_DATA updated_data;
     347             :         bool existed;
     348             :         bool modified;
     349             :         bool unlock;
     350             : };
     351             : 
     352     3255474 : NTSTATUS g_lock_lock_cb_dump(struct g_lock_lock_cb_state *cb_state,
     353             :                              void (*fn)(struct server_id exclusive,
     354             :                                         size_t num_shared,
     355             :                                         const struct server_id *shared,
     356             :                                         const uint8_t *data,
     357             :                                         size_t datalen,
     358             :                                         void *private_data),
     359             :                              void *private_data)
     360             : {
     361     3255474 :         struct g_lock *lck = cb_state->lck;
     362             : 
     363             :         /* We allow a cn_fn only for G_LOCK_WRITE for now... */
     364     3255474 :         SMB_ASSERT(lck->num_shared == 0);
     365             : 
     366     3255474 :         fn(lck->exclusive,
     367             :            0, /* num_shared */
     368             :            NULL, /* shared */
     369     3255474 :            lck->data,
     370             :            lck->datalen,
     371             :            private_data);
     372             : 
     373     3255474 :         return NT_STATUS_OK;
     374             : }
     375             : 
     376      756218 : NTSTATUS g_lock_lock_cb_writev(struct g_lock_lock_cb_state *cb_state,
     377             :                                const TDB_DATA *dbufs,
     378             :                                size_t num_dbufs)
     379             : {
     380        1915 :         NTSTATUS status;
     381             : 
     382      756218 :         status = dbwrap_merge_dbufs(&cb_state->updated_data,
     383             :                                     cb_state->update_mem_ctx,
     384             :                                     dbufs, num_dbufs);
     385      756218 :         if (!NT_STATUS_IS_OK(status)) {
     386           0 :                 return status;
     387             :         }
     388             : 
     389      756218 :         cb_state->modified = true;
     390      756218 :         cb_state->lck->data = cb_state->updated_data.dptr;
     391      756218 :         cb_state->lck->datalen = cb_state->updated_data.dsize;
     392             : 
     393      756218 :         return NT_STATUS_OK;
     394             : }
     395             : 
     396      457726 : void g_lock_lock_cb_unlock(struct g_lock_lock_cb_state *cb_state)
     397             : {
     398      457726 :         cb_state->unlock = true;
     399      457726 : }
     400             : 
     401             : struct g_lock_lock_cb_watch_data_state {
     402             :         struct tevent_context *ev;
     403             :         struct g_lock_ctx *ctx;
     404             :         TDB_DATA key;
     405             :         struct server_id blocker;
     406             :         bool blockerdead;
     407             :         uint64_t unique_lock_epoch;
     408             :         uint64_t unique_data_epoch;
     409             :         uint64_t watch_instance;
     410             :         NTSTATUS status;
     411             : };
     412             : 
     413             : static void g_lock_lock_cb_watch_data_done(struct tevent_req *subreq);
     414             : 
     415         499 : struct tevent_req *g_lock_lock_cb_watch_data_send(
     416             :         TALLOC_CTX *mem_ctx,
     417             :         struct tevent_context *ev,
     418             :         struct g_lock_lock_cb_state *cb_state,
     419             :         struct server_id blocker)
     420             : {
     421         499 :         struct tevent_req *req = NULL;
     422         499 :         struct g_lock_lock_cb_watch_data_state *state = NULL;
     423         499 :         struct tevent_req *subreq = NULL;
     424         499 :         TDB_DATA key = dbwrap_record_get_key(cb_state->rec);
     425             : 
     426         499 :         req = tevent_req_create(
     427             :                 mem_ctx, &state, struct g_lock_lock_cb_watch_data_state);
     428         499 :         if (req == NULL) {
     429           0 :                 return NULL;
     430             :         }
     431         499 :         state->ev = ev;
     432         499 :         state->ctx = cb_state->ctx;
     433         499 :         state->blocker = blocker;
     434             : 
     435         499 :         state->key = tdb_data_talloc_copy(state, key);
     436         499 :         if (tevent_req_nomem(state->key.dptr, req)) {
     437           0 :                 return tevent_req_post(req, ev);
     438             :         }
     439             : 
     440         499 :         state->unique_lock_epoch = cb_state->lck->unique_lock_epoch;
     441         499 :         state->unique_data_epoch = cb_state->lck->unique_data_epoch;
     442             : 
     443         499 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
     444             : 
     445         499 :         subreq = dbwrap_watched_watch_send(
     446         499 :                 state, state->ev, cb_state->rec, 0, state->blocker);
     447         499 :         if (tevent_req_nomem(subreq, req)) {
     448           0 :                 return tevent_req_post(req, ev);
     449             :         }
     450         499 :         tevent_req_set_callback(subreq, g_lock_lock_cb_watch_data_done, req);
     451             : 
     452         499 :         return req;
     453             : }
     454             : 
     455         713 : static void g_lock_lock_cb_watch_data_done_fn(
     456             :         struct db_record *rec,
     457             :         TDB_DATA value,
     458             :         void *private_data)
     459             : {
     460         713 :         struct tevent_req *req = talloc_get_type_abort(
     461             :                 private_data, struct tevent_req);
     462         713 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     463             :                 req, struct g_lock_lock_cb_watch_data_state);
     464         713 :         struct tevent_req *subreq = NULL;
     465           0 :         struct g_lock lck;
     466           0 :         bool ok;
     467             : 
     468         713 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
     469         713 :         if (!ok) {
     470           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     471           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     472           0 :                 return;
     473             :         }
     474             : 
     475         713 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
     476         447 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     477         447 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
     478             :                           "state->unique_data_epoch=%"PRIu64"\n",
     479             :                           lck.unique_data_epoch,
     480             :                           state->unique_data_epoch);
     481         447 :                 state->status = NT_STATUS_OK;
     482         447 :                 return;
     483             :         }
     484             : 
     485             :         /*
     486             :          * The lock epoch changed, so we better
     487             :          * remove ourself from the waiter list
     488             :          * (most likely the first position)
     489             :          * and re-add us at the end of the list.
     490             :          *
     491             :          * This gives other lock waiters a change
     492             :          * to make progress.
     493             :          *
     494             :          * Otherwise we'll keep our waiter instance alive,
     495             :          * keep waiting (most likely at first position).
     496             :          */
     497         266 :         if (lck.unique_lock_epoch != state->unique_lock_epoch) {
     498         242 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     499         242 :                 state->watch_instance = dbwrap_watched_watch_add_instance(rec);
     500         242 :                 state->unique_lock_epoch = lck.unique_lock_epoch;
     501             :         }
     502             : 
     503         266 :         subreq = dbwrap_watched_watch_send(
     504             :                 state, state->ev, rec, state->watch_instance, state->blocker);
     505         266 :         if (subreq == NULL) {
     506           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     507           0 :                 state->status = NT_STATUS_NO_MEMORY;
     508           0 :                 return;
     509             :         }
     510         266 :         tevent_req_set_callback(subreq, g_lock_lock_cb_watch_data_done, req);
     511             : 
     512         266 :         state->status = NT_STATUS_EVENT_PENDING;
     513             : }
     514             : 
     515         713 : static void g_lock_lock_cb_watch_data_done(struct tevent_req *subreq)
     516             : {
     517         713 :         struct tevent_req *req = tevent_req_callback_data(
     518             :                 subreq, struct tevent_req);
     519         713 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     520             :                 req, struct g_lock_lock_cb_watch_data_state);
     521           0 :         NTSTATUS status;
     522         713 :         uint64_t instance = 0;
     523             : 
     524         713 :         status = dbwrap_watched_watch_recv(
     525             :                 subreq, &instance, &state->blockerdead, &state->blocker);
     526         713 :         TALLOC_FREE(subreq);
     527         713 :         if (tevent_req_nterror(req, status)) {
     528           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
     529             :                           nt_errstr(status));
     530         266 :                 return;
     531             :         }
     532             : 
     533         713 :         state->watch_instance = instance;
     534             : 
     535         713 :         status = dbwrap_do_locked(
     536         713 :                 state->ctx->db, state->key, g_lock_lock_cb_watch_data_done_fn, req);
     537         713 :         if (tevent_req_nterror(req, status)) {
     538           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
     539           0 :                 return;
     540             :         }
     541         713 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
     542         266 :                 return;
     543             :         }
     544         447 :         if (tevent_req_nterror(req, state->status)) {
     545           0 :                 return;
     546             :         }
     547         447 :         tevent_req_done(req);
     548             : }
     549             : 
     550         447 : NTSTATUS g_lock_lock_cb_watch_data_recv(
     551             :         struct tevent_req *req,
     552             :         bool *blockerdead,
     553             :         struct server_id *blocker)
     554             : {
     555         447 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     556             :                 req, struct g_lock_lock_cb_watch_data_state);
     557           0 :         NTSTATUS status;
     558             : 
     559         447 :         if (tevent_req_is_nterror(req, &status)) {
     560           0 :                 return status;
     561             :         }
     562         447 :         if (blockerdead != NULL) {
     563         447 :                 *blockerdead = state->blockerdead;
     564             :         }
     565         447 :         if (blocker != NULL) {
     566         447 :                 *blocker = state->blocker;
     567             :         }
     568             : 
     569         447 :         return NT_STATUS_OK;
     570             : }
     571             : 
     572        2542 : void g_lock_lock_cb_wake_watchers(struct g_lock_lock_cb_state *cb_state)
     573             : {
     574        2542 :         struct g_lock *lck = cb_state->lck;
     575             : 
     576        2542 :         lck->unique_data_epoch = generate_unique_u64(lck->unique_data_epoch);
     577        2542 :         cb_state->modified = true;
     578        2542 : }
     579             : 
     580      948869 : static NTSTATUS g_lock_lock_cb_run_and_store(struct g_lock_lock_cb_state *cb_state)
     581             : {
     582      948869 :         struct g_lock *lck = cb_state->lck;
     583      948869 :         NTSTATUS success_status = NT_STATUS_OK;
     584        2314 :         NTSTATUS status;
     585             : 
     586      948869 :         if (cb_state->cb_fn != NULL) {
     587             : 
     588      937014 :                 SMB_ASSERT(lck->num_shared == 0);
     589      937014 :                 SMB_ASSERT(cb_state->new_shared == NULL);
     590             : 
     591      937014 :                 if (cb_state->ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     592      937014 :                         const char *name = dbwrap_name(cb_state->ctx->db);
     593      937014 :                         dbwrap_lock_order_lock(name, cb_state->ctx->lock_order);
     594             :                 }
     595             : 
     596      937014 :                 cb_state->ctx->busy = true;
     597      937014 :                 cb_state->cb_fn(cb_state, cb_state->cb_private);
     598      937014 :                 cb_state->ctx->busy = false;
     599             : 
     600      937014 :                 if (cb_state->ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     601      937014 :                         const char *name = dbwrap_name(cb_state->ctx->db);
     602      937014 :                         dbwrap_lock_order_unlock(name, cb_state->ctx->lock_order);
     603             :                 }
     604             :         }
     605             : 
     606      948869 :         if (cb_state->unlock) {
     607             :                 /*
     608             :                  * Unlocked should wake up watchers.
     609             :                  *
     610             :                  * We no longer need the lock, so
     611             :                  * force a wakeup of the next watchers,
     612             :                  * even if we don't do any update.
     613             :                  */
     614      457726 :                 dbwrap_watched_watch_reset_alerting(cb_state->rec);
     615      457726 :                 dbwrap_watched_watch_force_alerting(cb_state->rec);
     616      457726 :                 if (!cb_state->modified) {
     617             :                         /*
     618             :                          * The record was not changed at
     619             :                          * all, so we can also avoid
     620             :                          * storing the lck.unique_lock_epoch
     621             :                          * change
     622             :                          */
     623       13003 :                         return NT_STATUS_WAS_UNLOCKED;
     624             :                 }
     625      444723 :                 lck->exclusive = (struct server_id) { .pid = 0 };
     626      444723 :                 cb_state->new_shared = NULL;
     627             : 
     628      444723 :                 if (lck->datalen == 0) {
     629      243491 :                         if (!cb_state->existed) {
     630           0 :                                 return NT_STATUS_WAS_UNLOCKED;
     631             :                         }
     632             : 
     633      243491 :                         status = dbwrap_record_delete(cb_state->rec);
     634      243491 :                         if (!NT_STATUS_IS_OK(status)) {
     635           0 :                                 DBG_WARNING("dbwrap_record_delete() failed: %s\n",
     636             :                                     nt_errstr(status));
     637           0 :                                 return status;
     638             :                         }
     639      243491 :                         return NT_STATUS_WAS_UNLOCKED;
     640             :                 }
     641             : 
     642      200428 :                 success_status = NT_STATUS_WAS_UNLOCKED;
     643             :         }
     644             : 
     645      692375 :         status = g_lock_store(cb_state->rec,
     646             :                               cb_state->lck,
     647             :                               cb_state->new_shared,
     648             :                               NULL, 0);
     649      692375 :         if (!NT_STATUS_IS_OK(status)) {
     650           0 :                 DBG_WARNING("g_lock_store() failed: %s\n",
     651             :                             nt_errstr(status));
     652           0 :                 return status;
     653             :         }
     654             : 
     655      692375 :         return success_status;
     656             : }
     657             : 
     658             : struct g_lock_lock_state {
     659             :         struct tevent_context *ev;
     660             :         struct g_lock_ctx *ctx;
     661             :         TDB_DATA key;
     662             :         enum g_lock_type type;
     663             :         bool retry;
     664             :         g_lock_lock_cb_fn_t cb_fn;
     665             :         void *cb_private;
     666             : };
     667             : 
     668             : struct g_lock_lock_fn_state {
     669             :         struct g_lock_lock_state *req_state;
     670             :         struct server_id *dead_blocker;
     671             : 
     672             :         struct tevent_req *watch_req;
     673             :         uint64_t watch_instance;
     674             :         NTSTATUS status;
     675             : };
     676             : 
     677             : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s);
     678             : 
     679         318 : static NTSTATUS g_lock_trylock(
     680             :         struct db_record *rec,
     681             :         struct g_lock_lock_fn_state *state,
     682             :         TDB_DATA data,
     683             :         struct server_id *blocker)
     684             : {
     685         318 :         struct g_lock_lock_state *req_state = state->req_state;
     686         318 :         struct server_id self = messaging_server_id(req_state->ctx->msg);
     687         318 :         enum g_lock_type type = req_state->type;
     688         318 :         bool retry = req_state->retry;
     689         318 :         struct g_lock lck = { .exclusive.pid = 0 };
     690         636 :         struct g_lock_lock_cb_state cb_state = {
     691         318 :                 .ctx = req_state->ctx,
     692             :                 .rec = rec,
     693             :                 .lck = &lck,
     694         318 :                 .cb_fn = req_state->cb_fn,
     695         318 :                 .cb_private = req_state->cb_private,
     696         318 :                 .existed = data.dsize != 0,
     697         318 :                 .update_mem_ctx = talloc_tos(),
     698             :         };
     699          14 :         struct server_id_buf tmp;
     700          14 :         NTSTATUS status;
     701          14 :         bool ok;
     702             : 
     703         318 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
     704         318 :         if (!ok) {
     705           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     706           0 :                 DBG_DEBUG("g_lock_parse failed\n");
     707           0 :                 return NT_STATUS_INTERNAL_DB_CORRUPTION;
     708             :         }
     709             : 
     710         318 :         g_lock_cleanup_dead(&lck, state->dead_blocker);
     711             : 
     712         318 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
     713             : 
     714         318 :         if (lck.exclusive.pid != 0) {
     715          21 :                 bool self_exclusive = server_id_equal(&self, &lck.exclusive);
     716             : 
     717          21 :                 if (!self_exclusive) {
     718          18 :                         bool exists = serverid_exists(&lck.exclusive);
     719          18 :                         if (!exists) {
     720           0 :                                 lck.exclusive = (struct server_id) { .pid=0 };
     721           0 :                                 goto noexclusive;
     722             :                         }
     723             : 
     724          18 :                         DBG_DEBUG("%s has an exclusive lock\n",
     725             :                                   server_id_str_buf(lck.exclusive, &tmp));
     726             : 
     727          18 :                         if (type == G_LOCK_DOWNGRADE) {
     728           0 :                                 struct server_id_buf tmp2;
     729             : 
     730           0 :                                 dbwrap_watched_watch_remove_instance(rec,
     731             :                                                 state->watch_instance);
     732             : 
     733           0 :                                 DBG_DEBUG("%s: Trying to downgrade %s\n",
     734             :                                           server_id_str_buf(self, &tmp),
     735             :                                           server_id_str_buf(
     736             :                                                   lck.exclusive, &tmp2));
     737           0 :                                 return NT_STATUS_NOT_LOCKED;
     738             :                         }
     739             : 
     740          18 :                         if (type == G_LOCK_UPGRADE) {
     741           1 :                                 ssize_t shared_idx;
     742             : 
     743           1 :                                 dbwrap_watched_watch_remove_instance(rec,
     744             :                                                 state->watch_instance);
     745             : 
     746           1 :                                 shared_idx = g_lock_find_shared(&lck, &self);
     747             : 
     748           1 :                                 if (shared_idx == -1) {
     749           0 :                                         DBG_DEBUG("Trying to upgrade %s "
     750             :                                                   "without "
     751             :                                                   "existing shared lock\n",
     752             :                                                   server_id_str_buf(
     753             :                                                           self, &tmp));
     754           0 :                                         return NT_STATUS_NOT_LOCKED;
     755             :                                 }
     756             : 
     757             :                                 /*
     758             :                                  * We're trying to upgrade, and the
     759             :                                  * exclusive lock is taken by someone
     760             :                                  * else. This means that someone else
     761             :                                  * is waiting for us to give up our
     762             :                                  * shared lock. If we now also wait
     763             :                                  * for someone to give their shared
     764             :                                  * lock, we will deadlock.
     765             :                                  */
     766             : 
     767           1 :                                 DBG_DEBUG("Trying to upgrade %s while "
     768             :                                           "someone else is also "
     769             :                                           "trying to upgrade\n",
     770             :                                           server_id_str_buf(self, &tmp));
     771           1 :                                 return NT_STATUS_POSSIBLE_DEADLOCK;
     772             :                         }
     773             : 
     774          17 :                         DBG_DEBUG("Waiting for lck.exclusive=%s\n",
     775             :                                   server_id_str_buf(lck.exclusive, &tmp));
     776             : 
     777             :                         /*
     778             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     779             :                          * and need to monitor the record.
     780             :                          *
     781             :                          * If we don't have a watcher instance yet,
     782             :                          * we should add one.
     783             :                          */
     784          17 :                         if (state->watch_instance == 0) {
     785          15 :                                 state->watch_instance =
     786          15 :                                         dbwrap_watched_watch_add_instance(rec);
     787             :                         }
     788             : 
     789          17 :                         *blocker = lck.exclusive;
     790          17 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     791             :                 }
     792             : 
     793           3 :                 if (type == G_LOCK_DOWNGRADE) {
     794           0 :                         DBG_DEBUG("Downgrading %s from WRITE to READ\n",
     795             :                                   server_id_str_buf(self, &tmp));
     796             : 
     797           0 :                         lck.exclusive = (struct server_id) { .pid = 0 };
     798           0 :                         goto do_shared;
     799             :                 }
     800             : 
     801           3 :                 if (!retry) {
     802           1 :                         dbwrap_watched_watch_remove_instance(rec,
     803             :                                                 state->watch_instance);
     804             : 
     805           1 :                         DBG_DEBUG("%s already locked by self\n",
     806             :                                   server_id_str_buf(self, &tmp));
     807           1 :                         return NT_STATUS_WAS_LOCKED;
     808             :                 }
     809             : 
     810           2 :                 g_lock_cleanup_shared(&lck);
     811             : 
     812           2 :                 if (lck.num_shared != 0) {
     813           0 :                         g_lock_get_shared(&lck, 0, blocker);
     814             : 
     815           0 :                         DBG_DEBUG("Continue waiting for shared lock %s\n",
     816             :                                   server_id_str_buf(*blocker, &tmp));
     817             : 
     818             :                         /*
     819             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     820             :                          * and need to monitor the record.
     821             :                          *
     822             :                          * If we don't have a watcher instance yet,
     823             :                          * we should add one.
     824             :                          */
     825           0 :                         if (state->watch_instance == 0) {
     826           0 :                                 state->watch_instance =
     827           0 :                                         dbwrap_watched_watch_add_instance(rec);
     828             :                         }
     829             : 
     830           0 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     831             :                 }
     832             : 
     833             :                 /*
     834             :                  * Retry after a conflicting lock was released..
     835             :                  * All pending readers are gone so we got the lock...
     836             :                  */
     837           2 :                 goto got_lock;
     838             :         }
     839             : 
     840         297 : noexclusive:
     841             : 
     842         297 :         if (type == G_LOCK_UPGRADE) {
     843           3 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     844             : 
     845           3 :                 if (shared_idx == -1) {
     846           0 :                         dbwrap_watched_watch_remove_instance(rec,
     847             :                                                 state->watch_instance);
     848             : 
     849           0 :                         DBG_DEBUG("Trying to upgrade %s without "
     850             :                                   "existing shared lock\n",
     851             :                                   server_id_str_buf(self, &tmp));
     852           0 :                         return NT_STATUS_NOT_LOCKED;
     853             :                 }
     854             : 
     855           3 :                 g_lock_del_shared(&lck, shared_idx);
     856           3 :                 type = G_LOCK_WRITE;
     857             :         }
     858             : 
     859         297 :         if (type == G_LOCK_WRITE) {
     860         297 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     861             : 
     862         297 :                 if (shared_idx != -1) {
     863           0 :                         dbwrap_watched_watch_remove_instance(rec,
     864             :                                                 state->watch_instance);
     865           0 :                         DBG_DEBUG("Trying to writelock existing shared %s\n",
     866             :                                   server_id_str_buf(self, &tmp));
     867           0 :                         return NT_STATUS_WAS_LOCKED;
     868             :                 }
     869             : 
     870         297 :                 lck.exclusive = self;
     871             : 
     872         297 :                 g_lock_cleanup_shared(&lck);
     873             : 
     874         297 :                 if (lck.num_shared == 0) {
     875             :                         /*
     876             :                          * If we store ourself as exclusive writer,
     877             :                          * without any pending readers ...
     878             :                          */
     879         292 :                         goto got_lock;
     880             :                 }
     881             : 
     882           5 :                 if (state->watch_instance == 0) {
     883             :                         /*
     884             :                          * Here we have lck.num_shared != 0.
     885             :                          *
     886             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     887             :                          * below.
     888             :                          *
     889             :                          * And don't have a watcher instance yet!
     890             :                          *
     891             :                          * We add it here before g_lock_store()
     892             :                          * in order to trigger just one
     893             :                          * low level dbwrap_do_locked() call.
     894             :                          */
     895           5 :                         state->watch_instance =
     896           5 :                                 dbwrap_watched_watch_add_instance(rec);
     897             :                 }
     898             : 
     899           5 :                 status = g_lock_store(rec, &lck, NULL, NULL, 0);
     900           5 :                 if (!NT_STATUS_IS_OK(status)) {
     901           0 :                         DBG_DEBUG("g_lock_store() failed: %s\n",
     902             :                                   nt_errstr(status));
     903           0 :                         return status;
     904             :                 }
     905             : 
     906           5 :                 talloc_set_destructor(
     907             :                         req_state, g_lock_lock_state_destructor);
     908             : 
     909           5 :                 g_lock_get_shared(&lck, 0, blocker);
     910             : 
     911           5 :                 DBG_DEBUG("Waiting for %zu shared locks, "
     912             :                           "picking blocker %s\n",
     913             :                           lck.num_shared,
     914             :                           server_id_str_buf(*blocker, &tmp));
     915             : 
     916           5 :                 return NT_STATUS_LOCK_NOT_GRANTED;
     917             :         }
     918             : 
     919           0 : do_shared:
     920             : 
     921           0 :         g_lock_cleanup_shared(&lck);
     922           0 :         cb_state.new_shared = &self;
     923           0 :         goto got_lock;
     924             : 
     925         294 : got_lock:
     926             :         /*
     927             :          * We got the lock we asked for, so we no
     928             :          * longer need to monitor the record.
     929             :          */
     930         294 :         dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     931             : 
     932         294 :         status = g_lock_lock_cb_run_and_store(&cb_state);
     933         294 :         if (!NT_STATUS_IS_OK(status) &&
     934           7 :             !NT_STATUS_EQUAL(status, NT_STATUS_WAS_UNLOCKED))
     935             :         {
     936           0 :                 DBG_WARNING("g_lock_lock_cb_run_and_store() failed: %s\n",
     937             :                             nt_errstr(status));
     938           0 :                 return status;
     939             :         }
     940             : 
     941         294 :         talloc_set_destructor(req_state, NULL);
     942         294 :         return status;
     943             : }
     944             : 
     945         318 : static void g_lock_lock_fn(
     946             :         struct db_record *rec,
     947             :         TDB_DATA value,
     948             :         void *private_data)
     949             : {
     950         318 :         struct g_lock_lock_fn_state *state = private_data;
     951         318 :         struct server_id blocker = {0};
     952             : 
     953             :         /*
     954             :          * We're trying to get a lock and if we are
     955             :          * successful in doing that, we should not
     956             :          * wakeup any other waiters, all they would
     957             :          * find is that we're holding a lock they
     958             :          * are conflicting with.
     959             :          */
     960         318 :         dbwrap_watched_watch_skip_alerting(rec);
     961             : 
     962         318 :         state->status = g_lock_trylock(rec, state, value, &blocker);
     963         318 :         if (!NT_STATUS_IS_OK(state->status)) {
     964          31 :                 DBG_DEBUG("g_lock_trylock returned %s\n",
     965             :                           nt_errstr(state->status));
     966             :         }
     967         318 :         if (!NT_STATUS_EQUAL(state->status, NT_STATUS_LOCK_NOT_GRANTED)) {
     968         296 :                 return;
     969             :         }
     970             : 
     971          44 :         state->watch_req = dbwrap_watched_watch_send(
     972          22 :                 state->req_state, state->req_state->ev, rec, state->watch_instance, blocker);
     973          22 :         if (state->watch_req == NULL) {
     974           0 :                 state->status = NT_STATUS_NO_MEMORY;
     975             :         }
     976             : }
     977             : 
     978           2 : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s)
     979             : {
     980           2 :         NTSTATUS status = g_lock_unlock(s->ctx, s->key);
     981           2 :         if (!NT_STATUS_IS_OK(status)) {
     982           0 :                 DBG_DEBUG("g_lock_unlock failed: %s\n", nt_errstr(status));
     983             :         }
     984           2 :         return 0;
     985             : }
     986             : 
     987             : static void g_lock_lock_retry(struct tevent_req *subreq);
     988             : 
     989         301 : struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
     990             :                                     struct tevent_context *ev,
     991             :                                     struct g_lock_ctx *ctx,
     992             :                                     TDB_DATA key,
     993             :                                     enum g_lock_type type,
     994             :                                     g_lock_lock_cb_fn_t cb_fn,
     995             :                                     void *cb_private)
     996             : {
     997          11 :         struct tevent_req *req;
     998          11 :         struct g_lock_lock_state *state;
     999          11 :         struct g_lock_lock_fn_state fn_state;
    1000          11 :         NTSTATUS status;
    1001          11 :         bool ok;
    1002             : 
    1003         301 :         SMB_ASSERT(!ctx->busy);
    1004             : 
    1005         301 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
    1006         301 :         if (req == NULL) {
    1007           0 :                 return NULL;
    1008             :         }
    1009         301 :         state->ev = ev;
    1010         301 :         state->ctx = ctx;
    1011         301 :         state->key = key;
    1012         301 :         state->type = type;
    1013         301 :         state->cb_fn = cb_fn;
    1014         301 :         state->cb_private = cb_private;
    1015             : 
    1016         301 :         fn_state = (struct g_lock_lock_fn_state) {
    1017             :                 .req_state = state,
    1018             :         };
    1019             : 
    1020             :         /*
    1021             :          * We allow a cn_fn only for G_LOCK_WRITE for now.
    1022             :          *
    1023             :          * It's all we currently need and it makes a few things
    1024             :          * easier to implement.
    1025             :          */
    1026         301 :         if (unlikely(cb_fn != NULL && type != G_LOCK_WRITE)) {
    1027           0 :                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_6);
    1028           0 :                 return tevent_req_post(req, ev);
    1029             :         }
    1030             : 
    1031         301 :         status = dbwrap_do_locked(ctx->db, key, g_lock_lock_fn, &fn_state);
    1032         301 :         if (tevent_req_nterror(req, status)) {
    1033           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
    1034             :                           nt_errstr(status));
    1035           0 :                 return tevent_req_post(req, ev);
    1036             :         }
    1037             : 
    1038         301 :         if (NT_STATUS_IS_OK(fn_state.status)) {
    1039         279 :                 tevent_req_done(req);
    1040         279 :                 return tevent_req_post(req, ev);
    1041             :         }
    1042          22 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1043           2 :                 tevent_req_nterror(req, fn_state.status);
    1044           2 :                 return tevent_req_post(req, ev);
    1045             :         }
    1046             : 
    1047          20 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
    1048           0 :                 return tevent_req_post(req, ev);
    1049             :         }
    1050             : 
    1051          56 :         ok = tevent_req_set_endtime(
    1052             :                 fn_state.watch_req,
    1053          20 :                 state->ev,
    1054          20 :                 timeval_current_ofs(5 + generate_random() % 5, 0));
    1055          20 :         if (!ok) {
    1056           0 :                 tevent_req_oom(req);
    1057           0 :                 return tevent_req_post(req, ev);
    1058             :         }
    1059          20 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
    1060             : 
    1061          20 :         return req;
    1062             : }
    1063             : 
    1064          17 : static void g_lock_lock_retry(struct tevent_req *subreq)
    1065             : {
    1066          17 :         struct tevent_req *req = tevent_req_callback_data(
    1067             :                 subreq, struct tevent_req);
    1068          17 :         struct g_lock_lock_state *state = tevent_req_data(
    1069             :                 req, struct g_lock_lock_state);
    1070           3 :         struct g_lock_lock_fn_state fn_state;
    1071          17 :         struct server_id blocker = { .pid = 0 };
    1072          17 :         bool blockerdead = false;
    1073           3 :         NTSTATUS status;
    1074          17 :         uint64_t instance = 0;
    1075             : 
    1076          17 :         status = dbwrap_watched_watch_recv(subreq, &instance, &blockerdead, &blocker);
    1077          17 :         DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status));
    1078          17 :         TALLOC_FREE(subreq);
    1079             : 
    1080          17 :         if (!NT_STATUS_IS_OK(status) &&
    1081           0 :             !NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
    1082           0 :                 tevent_req_nterror(req, status);
    1083           0 :                 return;
    1084             :         }
    1085             : 
    1086          17 :         state->retry = true;
    1087             : 
    1088          20 :         fn_state = (struct g_lock_lock_fn_state) {
    1089             :                 .req_state = state,
    1090          17 :                 .dead_blocker = blockerdead ? &blocker : NULL,
    1091             :                 .watch_instance = instance,
    1092             :         };
    1093             : 
    1094          17 :         status = dbwrap_do_locked(state->ctx->db, state->key,
    1095             :                                   g_lock_lock_fn, &fn_state);
    1096          17 :         if (tevent_req_nterror(req, status)) {
    1097           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
    1098             :                           nt_errstr(status));
    1099           0 :                 return;
    1100             :         }
    1101             : 
    1102          17 :         if (NT_STATUS_IS_OK(fn_state.status)) {
    1103           8 :                 tevent_req_done(req);
    1104           8 :                 return;
    1105             :         }
    1106           9 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1107           7 :                 tevent_req_nterror(req, fn_state.status);
    1108           7 :                 return;
    1109             :         }
    1110             : 
    1111           2 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
    1112           0 :                 return;
    1113             :         }
    1114             : 
    1115           2 :         if (!tevent_req_set_endtime(
    1116             :                     fn_state.watch_req, state->ev,
    1117           2 :                     timeval_current_ofs(5 + generate_random() % 5, 0))) {
    1118           0 :                 return;
    1119             :         }
    1120           2 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
    1121             : }
    1122             : 
    1123         300 : NTSTATUS g_lock_lock_recv(struct tevent_req *req)
    1124             : {
    1125         300 :         struct g_lock_lock_state *state = tevent_req_data(
    1126             :                 req, struct g_lock_lock_state);
    1127         300 :         struct g_lock_ctx *ctx = state->ctx;
    1128          10 :         NTSTATUS status;
    1129             : 
    1130         300 :         if (tevent_req_is_nterror(req, &status)) {
    1131          13 :                 if (NT_STATUS_EQUAL(status, NT_STATUS_WAS_UNLOCKED)) {
    1132           7 :                         return NT_STATUS_OK;
    1133             :                 }
    1134           6 :                 return status;
    1135             :         }
    1136             : 
    1137         287 :         if ((ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) &&
    1138           6 :             ((state->type == G_LOCK_READ) ||
    1139           6 :              (state->type == G_LOCK_WRITE))) {
    1140           6 :                 const char *name = dbwrap_name(ctx->db);
    1141           6 :                 dbwrap_lock_order_lock(name, ctx->lock_order);
    1142             :         }
    1143             : 
    1144         287 :         return NT_STATUS_OK;
    1145             : }
    1146             : 
    1147             : struct g_lock_lock_simple_state {
    1148             :         struct g_lock_ctx *ctx;
    1149             :         struct server_id me;
    1150             :         enum g_lock_type type;
    1151             :         NTSTATUS status;
    1152             :         g_lock_lock_cb_fn_t cb_fn;
    1153             :         void *cb_private;
    1154             : };
    1155             : 
    1156      948593 : static void g_lock_lock_simple_fn(
    1157             :         struct db_record *rec,
    1158             :         TDB_DATA value,
    1159             :         void *private_data)
    1160             : {
    1161      948593 :         struct g_lock_lock_simple_state *state = private_data;
    1162        2315 :         struct server_id_buf buf;
    1163      948593 :         struct g_lock lck = { .exclusive.pid = 0 };
    1164     1897186 :         struct g_lock_lock_cb_state cb_state = {
    1165      948593 :                 .ctx = state->ctx,
    1166             :                 .rec = rec,
    1167             :                 .lck = &lck,
    1168      948593 :                 .cb_fn = state->cb_fn,
    1169      948593 :                 .cb_private = state->cb_private,
    1170      948593 :                 .existed = value.dsize != 0,
    1171      948593 :                 .update_mem_ctx = talloc_tos(),
    1172             :         };
    1173        2315 :         bool ok;
    1174             : 
    1175      948593 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1176      948593 :         if (!ok) {
    1177           0 :                 DBG_DEBUG("g_lock_parse failed\n");
    1178           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1179           0 :                 return;
    1180             :         }
    1181             : 
    1182      948593 :         if (lck.exclusive.pid != 0) {
    1183          16 :                 DBG_DEBUG("locked by %s\n",
    1184             :                           server_id_str_buf(lck.exclusive, &buf));
    1185          16 :                 goto not_granted;
    1186             :         }
    1187             : 
    1188      948577 :         if (state->type == G_LOCK_WRITE) {
    1189      948559 :                 if (lck.num_shared != 0) {
    1190           2 :                         DBG_DEBUG("num_shared=%zu\n", lck.num_shared);
    1191           2 :                         goto not_granted;
    1192             :                 }
    1193      948557 :                 lck.exclusive = state->me;
    1194          18 :         } else if (state->type == G_LOCK_READ) {
    1195          18 :                 g_lock_cleanup_shared(&lck);
    1196          18 :                 cb_state.new_shared = &state->me;
    1197             :         } else {
    1198           0 :                 smb_panic(__location__);
    1199             :         }
    1200             : 
    1201      948575 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
    1202             : 
    1203             :         /*
    1204             :          * We are going to store us as owner,
    1205             :          * so we got what we were waiting for.
    1206             :          *
    1207             :          * So we no longer need to monitor the
    1208             :          * record.
    1209             :          */
    1210      948575 :         dbwrap_watched_watch_skip_alerting(rec);
    1211             : 
    1212      948575 :         state->status = g_lock_lock_cb_run_and_store(&cb_state);
    1213      948575 :         if (!NT_STATUS_IS_OK(state->status) &&
    1214      456250 :             !NT_STATUS_EQUAL(state->status, NT_STATUS_WAS_UNLOCKED))
    1215             :         {
    1216           0 :                 DBG_WARNING("g_lock_lock_cb_run_and_store() failed: %s\n",
    1217             :                             nt_errstr(state->status));
    1218           0 :                 return;
    1219             :         }
    1220             : 
    1221      946265 :         return;
    1222             : 
    1223          18 : not_granted:
    1224          18 :         state->status = NT_STATUS_LOCK_NOT_GRANTED;
    1225             : }
    1226             : 
    1227      948596 : NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
    1228             :                      enum g_lock_type type, struct timeval timeout,
    1229             :                      g_lock_lock_cb_fn_t cb_fn,
    1230             :                      void *cb_private)
    1231             : {
    1232        2318 :         TALLOC_CTX *frame;
    1233        2318 :         struct tevent_context *ev;
    1234        2318 :         struct tevent_req *req;
    1235        2318 :         struct timeval end;
    1236        2318 :         NTSTATUS status;
    1237             : 
    1238      948596 :         SMB_ASSERT(!ctx->busy);
    1239             : 
    1240             :         /*
    1241             :          * We allow a cn_fn only for G_LOCK_WRITE for now.
    1242             :          *
    1243             :          * It's all we currently need and it makes a few things
    1244             :          * easier to implement.
    1245             :          */
    1246      948596 :         if (unlikely(cb_fn != NULL && type != G_LOCK_WRITE)) {
    1247           0 :                 return NT_STATUS_INVALID_PARAMETER_5;
    1248             :         }
    1249             : 
    1250      948596 :         if ((type == G_LOCK_READ) || (type == G_LOCK_WRITE)) {
    1251             :                 /*
    1252             :                  * This is an abstraction violation: Normally we do
    1253             :                  * the sync wrappers around async functions with full
    1254             :                  * nested event contexts. However, this is used in
    1255             :                  * very hot code paths, so avoid the event context
    1256             :                  * creation for the good path where there's no lock
    1257             :                  * contention. My benchmark gave a factor of 2
    1258             :                  * improvement for lock/unlock.
    1259             :                  */
    1260     1897186 :                 struct g_lock_lock_simple_state state = {
    1261             :                         .ctx = ctx,
    1262      948593 :                         .me = messaging_server_id(ctx->msg),
    1263             :                         .type = type,
    1264             :                         .cb_fn = cb_fn,
    1265             :                         .cb_private = cb_private,
    1266             :                 };
    1267      948593 :                 status = dbwrap_do_locked(
    1268             :                         ctx->db, key, g_lock_lock_simple_fn, &state);
    1269      948593 :                 if (!NT_STATUS_IS_OK(status)) {
    1270           0 :                         DBG_DEBUG("dbwrap_do_locked() failed: %s\n",
    1271             :                                   nt_errstr(status));
    1272      948575 :                         return status;
    1273             :                 }
    1274             : 
    1275      948593 :                 DBG_DEBUG("status=%s, state.status=%s\n",
    1276             :                           nt_errstr(status),
    1277             :                           nt_errstr(state.status));
    1278             : 
    1279      948593 :                 if (NT_STATUS_IS_OK(state.status)) {
    1280      490856 :                         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
    1281      490827 :                                 const char *name = dbwrap_name(ctx->db);
    1282      490827 :                                 dbwrap_lock_order_lock(name, ctx->lock_order);
    1283             :                         }
    1284      490856 :                         return NT_STATUS_OK;
    1285             :                 }
    1286      457737 :                 if (NT_STATUS_EQUAL(state.status, NT_STATUS_WAS_UNLOCKED)) {
    1287             :                         /* without dbwrap_lock_order_lock() */
    1288      457719 :                         return NT_STATUS_OK;
    1289             :                 }
    1290          18 :                 if (!NT_STATUS_EQUAL(
    1291             :                             state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1292           0 :                         return state.status;
    1293             :                 }
    1294             : 
    1295          18 :                 if (timeval_is_zero(&timeout)) {
    1296           0 :                         return NT_STATUS_LOCK_NOT_GRANTED;
    1297             :                 }
    1298             : 
    1299             :                 /*
    1300             :                  * Fall back to the full g_lock_trylock logic,
    1301             :                  * g_lock_lock_simple_fn() called above only covers
    1302             :                  * the uncontended path.
    1303             :                  */
    1304             :         }
    1305             : 
    1306          21 :         frame = talloc_stackframe();
    1307          21 :         status = NT_STATUS_NO_MEMORY;
    1308             : 
    1309          21 :         ev = samba_tevent_context_init(frame);
    1310          21 :         if (ev == NULL) {
    1311           0 :                 goto fail;
    1312             :         }
    1313          21 :         req = g_lock_lock_send(frame, ev, ctx, key, type, cb_fn, cb_private);
    1314          21 :         if (req == NULL) {
    1315           0 :                 goto fail;
    1316             :         }
    1317          21 :         end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
    1318          21 :         if (!tevent_req_set_endtime(req, ev, end)) {
    1319           0 :                 goto fail;
    1320             :         }
    1321          21 :         if (!tevent_req_poll_ntstatus(req, ev, &status)) {
    1322           0 :                 goto fail;
    1323             :         }
    1324          21 :         status = g_lock_lock_recv(req);
    1325          21 :  fail:
    1326          21 :         TALLOC_FREE(frame);
    1327          21 :         return status;
    1328             : }
    1329             : 
    1330             : struct g_lock_unlock_state {
    1331             :         struct server_id self;
    1332             :         NTSTATUS status;
    1333             : };
    1334             : 
    1335      491129 : static void g_lock_unlock_fn(
    1336             :         struct db_record *rec,
    1337             :         TDB_DATA value,
    1338             :         void *private_data)
    1339             : {
    1340      491129 :         struct g_lock_unlock_state *state = private_data;
    1341         831 :         struct server_id_buf tmp1, tmp2;
    1342         831 :         struct g_lock lck;
    1343         831 :         size_t i;
    1344         831 :         bool ok, exclusive;
    1345             : 
    1346      491129 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1347      491129 :         if (!ok) {
    1348           0 :                 DBG_DEBUG("g_lock_parse() failed\n");
    1349           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1350           0 :                 return;
    1351             :         }
    1352             : 
    1353      491129 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
    1354             : 
    1355      491974 :         for (i=0; i<lck.num_shared; i++) {
    1356          20 :                 struct server_id shared;
    1357          20 :                 g_lock_get_shared(&lck, i, &shared);
    1358          20 :                 if (server_id_equal(&state->self, &shared)) {
    1359           0 :                         break;
    1360             :                 }
    1361             :         }
    1362             : 
    1363      491129 :         if (i < lck.num_shared) {
    1364           6 :                 if (exclusive) {
    1365           0 :                         DBG_DEBUG("%s both exclusive and shared (%zu)\n",
    1366             :                                   server_id_str_buf(state->self, &tmp1),
    1367             :                                   i);
    1368           0 :                         state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1369           0 :                         return;
    1370             :                 }
    1371           6 :                 g_lock_del_shared(&lck, i);
    1372             :         } else {
    1373      491123 :                 if (!exclusive) {
    1374           1 :                         DBG_DEBUG("Lock not found, self=%s, lck.exclusive=%s, "
    1375             :                                   "num_shared=%zu\n",
    1376             :                                   server_id_str_buf(state->self, &tmp1),
    1377             :                                   server_id_str_buf(lck.exclusive, &tmp2),
    1378             :                                   lck.num_shared);
    1379           1 :                         state->status = NT_STATUS_NOT_FOUND;
    1380           1 :                         return;
    1381             :                 }
    1382      491122 :                 lck.exclusive = (struct server_id) { .pid = 0 };
    1383             :         }
    1384             : 
    1385      491128 :         if ((lck.exclusive.pid == 0) &&
    1386      491128 :             (lck.num_shared == 0) &&
    1387      491121 :             (lck.datalen == 0)) {
    1388      170682 :                 state->status = dbwrap_record_delete(rec);
    1389      170682 :                 return;
    1390             :         }
    1391             : 
    1392      320446 :         if (!exclusive && lck.exclusive.pid != 0) {
    1393             :                 /*
    1394             :                  * We only had a read lock and there's
    1395             :                  * someone waiting for an exclusive lock.
    1396             :                  *
    1397             :                  * Don't alert the exclusive lock waiter
    1398             :                  * if there are still other read lock holders.
    1399             :                  */
    1400           0 :                 g_lock_cleanup_shared(&lck);
    1401           0 :                 if (lck.num_shared != 0) {
    1402           0 :                         dbwrap_watched_watch_skip_alerting(rec);
    1403             :                 }
    1404             :         }
    1405             : 
    1406      320446 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
    1407             : 
    1408      320446 :         state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1409             : }
    1410             : 
    1411      491129 : NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key)
    1412             : {
    1413      491129 :         struct g_lock_unlock_state state = {
    1414      491129 :                 .self = messaging_server_id(ctx->msg),
    1415             :         };
    1416         831 :         NTSTATUS status;
    1417             : 
    1418      491129 :         SMB_ASSERT(!ctx->busy);
    1419             : 
    1420      491129 :         status = dbwrap_do_locked(ctx->db, key, g_lock_unlock_fn, &state);
    1421      491129 :         if (!NT_STATUS_IS_OK(status)) {
    1422           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1423             :                             nt_errstr(status));
    1424           0 :                 return status;
    1425             :         }
    1426      491129 :         if (!NT_STATUS_IS_OK(state.status)) {
    1427           1 :                 DBG_WARNING("g_lock_unlock_fn failed: %s\n",
    1428             :                             nt_errstr(state.status));
    1429           1 :                 return state.status;
    1430             :         }
    1431             : 
    1432      491128 :         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
    1433      490833 :                 const char *name = dbwrap_name(ctx->db);
    1434      490833 :                 dbwrap_lock_order_unlock(name, ctx->lock_order);
    1435             :         }
    1436             : 
    1437      491128 :         return NT_STATUS_OK;
    1438             : }
    1439             : 
    1440             : struct g_lock_writev_data_state {
    1441             :         TDB_DATA key;
    1442             :         struct server_id self;
    1443             :         const TDB_DATA *dbufs;
    1444             :         size_t num_dbufs;
    1445             :         NTSTATUS status;
    1446             : };
    1447             : 
    1448      172281 : static void g_lock_writev_data_fn(
    1449             :         struct db_record *rec,
    1450             :         TDB_DATA value,
    1451             :         void *private_data)
    1452             : {
    1453      172281 :         struct g_lock_writev_data_state *state = private_data;
    1454         356 :         struct g_lock lck;
    1455         356 :         bool exclusive;
    1456         356 :         bool ok;
    1457             : 
    1458             :         /*
    1459             :          * We're holding an exclusive write lock.
    1460             :          *
    1461             :          * Now we're updating the content of the record.
    1462             :          *
    1463             :          * We should not wakeup any other waiters, all they
    1464             :          * would find is that we're still holding a lock they
    1465             :          * are conflicting with.
    1466             :          */
    1467      172281 :         dbwrap_watched_watch_skip_alerting(rec);
    1468             : 
    1469      172281 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1470      172281 :         if (!ok) {
    1471           0 :                 DBG_DEBUG("g_lock_parse for %s failed\n",
    1472             :                           tdb_data_dbg(state->key));
    1473           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1474           0 :                 return;
    1475             :         }
    1476             : 
    1477      172281 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
    1478             : 
    1479             :         /*
    1480             :          * Make sure we're really exclusive. We are marked as
    1481             :          * exclusive when we are waiting for an exclusive lock
    1482             :          */
    1483      172281 :         exclusive &= (lck.num_shared == 0);
    1484             : 
    1485      172281 :         if (!exclusive) {
    1486           1 :                 struct server_id_buf buf1, buf2;
    1487           1 :                 DBG_DEBUG("Not locked by us: self=%s, lck.exclusive=%s, "
    1488             :                           "lck.num_shared=%zu\n",
    1489             :                           server_id_str_buf(state->self, &buf1),
    1490             :                           server_id_str_buf(lck.exclusive, &buf2),
    1491             :                           lck.num_shared);
    1492           1 :                 state->status = NT_STATUS_NOT_LOCKED;
    1493           1 :                 return;
    1494             :         }
    1495             : 
    1496      172280 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1497      172280 :         lck.data = NULL;
    1498      172280 :         lck.datalen = 0;
    1499      172280 :         state->status = g_lock_store(
    1500             :                 rec, &lck, NULL, state->dbufs, state->num_dbufs);
    1501             : }
    1502             : 
    1503      172281 : NTSTATUS g_lock_writev_data(
    1504             :         struct g_lock_ctx *ctx,
    1505             :         TDB_DATA key,
    1506             :         const TDB_DATA *dbufs,
    1507             :         size_t num_dbufs)
    1508             : {
    1509      344562 :         struct g_lock_writev_data_state state = {
    1510             :                 .key = key,
    1511      172281 :                 .self = messaging_server_id(ctx->msg),
    1512             :                 .dbufs = dbufs,
    1513             :                 .num_dbufs = num_dbufs,
    1514             :         };
    1515         356 :         NTSTATUS status;
    1516             : 
    1517      172281 :         SMB_ASSERT(!ctx->busy);
    1518             : 
    1519      172281 :         status = dbwrap_do_locked(
    1520             :                 ctx->db, key, g_lock_writev_data_fn, &state);
    1521      172281 :         if (!NT_STATUS_IS_OK(status)) {
    1522           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1523             :                             nt_errstr(status));
    1524           0 :                 return status;
    1525             :         }
    1526      172281 :         if (!NT_STATUS_IS_OK(state.status)) {
    1527           1 :                 DBG_WARNING("g_lock_writev_data_fn failed: %s\n",
    1528             :                             nt_errstr(state.status));
    1529           1 :                 return state.status;
    1530             :         }
    1531             : 
    1532      172280 :         return NT_STATUS_OK;
    1533             : }
    1534             : 
    1535           4 : NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
    1536             :                            const uint8_t *buf, size_t buflen)
    1537             : {
    1538           4 :         TDB_DATA dbuf = {
    1539             :                 .dptr = discard_const_p(uint8_t, buf),
    1540             :                 .dsize = buflen,
    1541             :         };
    1542           4 :         return g_lock_writev_data(ctx, key, &dbuf, 1);
    1543             : }
    1544             : 
    1545             : struct g_lock_locks_state {
    1546             :         int (*fn)(TDB_DATA key, void *private_data);
    1547             :         void *private_data;
    1548             : };
    1549             : 
    1550       18777 : static int g_lock_locks_fn(struct db_record *rec, void *priv)
    1551             : {
    1552           0 :         TDB_DATA key;
    1553       18777 :         struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
    1554             : 
    1555       18777 :         key = dbwrap_record_get_key(rec);
    1556       18777 :         return state->fn(key, state->private_data);
    1557             : }
    1558             : 
    1559        6333 : int g_lock_locks(struct g_lock_ctx *ctx,
    1560             :                  int (*fn)(TDB_DATA key, void *private_data),
    1561             :                  void *private_data)
    1562             : {
    1563           0 :         struct g_lock_locks_state state;
    1564           0 :         NTSTATUS status;
    1565           0 :         int count;
    1566             : 
    1567        6333 :         SMB_ASSERT(!ctx->busy);
    1568             : 
    1569        6333 :         state.fn = fn;
    1570        6333 :         state.private_data = private_data;
    1571             : 
    1572        6333 :         status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
    1573        6333 :         if (!NT_STATUS_IS_OK(status)) {
    1574           0 :                 return -1;
    1575             :         }
    1576        6333 :         return count;
    1577             : }
    1578             : 
    1579             : struct g_lock_dump_state {
    1580             :         TALLOC_CTX *mem_ctx;
    1581             :         TDB_DATA key;
    1582             :         void (*fn)(struct server_id exclusive,
    1583             :                    size_t num_shared,
    1584             :                    const struct server_id *shared,
    1585             :                    const uint8_t *data,
    1586             :                    size_t datalen,
    1587             :                    void *private_data);
    1588             :         void *private_data;
    1589             :         NTSTATUS status;
    1590             :         enum dbwrap_req_state req_state;
    1591             : };
    1592             : 
    1593      262826 : static void g_lock_dump_fn(TDB_DATA key, TDB_DATA data,
    1594             :                            void *private_data)
    1595             : {
    1596      262826 :         struct g_lock_dump_state *state = private_data;
    1597      262826 :         struct g_lock lck = (struct g_lock) { .exclusive.pid = 0 };
    1598      262826 :         struct server_id *shared = NULL;
    1599        1377 :         size_t i;
    1600        1377 :         bool ok;
    1601             : 
    1602      262826 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
    1603      262826 :         if (!ok) {
    1604           0 :                 DBG_DEBUG("g_lock_parse failed for %s\n",
    1605             :                           tdb_data_dbg(state->key));
    1606           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1607           0 :                 return;
    1608             :         }
    1609             : 
    1610      262826 :         if (lck.num_shared > 0) {
    1611           7 :                 shared = talloc_array(
    1612             :                         state->mem_ctx, struct server_id, lck.num_shared);
    1613           7 :                 if (shared == NULL) {
    1614           0 :                         DBG_DEBUG("talloc failed\n");
    1615           0 :                         state->status = NT_STATUS_NO_MEMORY;
    1616           0 :                         return;
    1617             :                 }
    1618             :         }
    1619             : 
    1620      262844 :         for (i=0; i<lck.num_shared; i++) {
    1621          18 :                 g_lock_get_shared(&lck, i, &shared[i]);
    1622             :         }
    1623             : 
    1624      262826 :         state->fn(lck.exclusive,
    1625             :                   lck.num_shared,
    1626             :                   shared,
    1627      262826 :                   lck.data,
    1628             :                   lck.datalen,
    1629             :                   state->private_data);
    1630             : 
    1631      262826 :         TALLOC_FREE(shared);
    1632             : 
    1633      262826 :         state->status = NT_STATUS_OK;
    1634             : }
    1635             : 
    1636     1087775 : NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, TDB_DATA key,
    1637             :                      void (*fn)(struct server_id exclusive,
    1638             :                                 size_t num_shared,
    1639             :                                 const struct server_id *shared,
    1640             :                                 const uint8_t *data,
    1641             :                                 size_t datalen,
    1642             :                                 void *private_data),
    1643             :                      void *private_data)
    1644             : {
    1645     1087775 :         struct g_lock_dump_state state = {
    1646             :                 .mem_ctx = ctx, .key = key,
    1647             :                 .fn = fn, .private_data = private_data
    1648             :         };
    1649        1812 :         NTSTATUS status;
    1650             : 
    1651     1087775 :         SMB_ASSERT(!ctx->busy);
    1652             : 
    1653     1087775 :         status = dbwrap_parse_record(ctx->db, key, g_lock_dump_fn, &state);
    1654     1087775 :         if (!NT_STATUS_IS_OK(status)) {
    1655      824949 :                 DBG_DEBUG("dbwrap_parse_record returned %s\n",
    1656             :                           nt_errstr(status));
    1657      824949 :                 return status;
    1658             :         }
    1659      262826 :         if (!NT_STATUS_IS_OK(state.status)) {
    1660           0 :                 DBG_DEBUG("g_lock_dump_fn returned %s\n",
    1661             :                           nt_errstr(state.status));
    1662           0 :                 return state.status;
    1663             :         }
    1664      262826 :         return NT_STATUS_OK;
    1665             : }
    1666             : 
    1667             : static void g_lock_dump_done(struct tevent_req *subreq);
    1668             : 
    1669           0 : struct tevent_req *g_lock_dump_send(
    1670             :         TALLOC_CTX *mem_ctx,
    1671             :         struct tevent_context *ev,
    1672             :         struct g_lock_ctx *ctx,
    1673             :         TDB_DATA key,
    1674             :         void (*fn)(struct server_id exclusive,
    1675             :                    size_t num_shared,
    1676             :                    const struct server_id *shared,
    1677             :                    const uint8_t *data,
    1678             :                    size_t datalen,
    1679             :                    void *private_data),
    1680             :         void *private_data)
    1681             : {
    1682           0 :         struct tevent_req *req = NULL, *subreq = NULL;
    1683           0 :         struct g_lock_dump_state *state = NULL;
    1684             : 
    1685           0 :         SMB_ASSERT(!ctx->busy);
    1686             : 
    1687           0 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_dump_state);
    1688           0 :         if (req == NULL) {
    1689           0 :                 return NULL;
    1690             :         }
    1691           0 :         state->mem_ctx = state;
    1692           0 :         state->key = key;
    1693           0 :         state->fn = fn;
    1694           0 :         state->private_data = private_data;
    1695             : 
    1696           0 :         SMB_ASSERT(!ctx->busy);
    1697             : 
    1698           0 :         subreq = dbwrap_parse_record_send(
    1699             :                 state,
    1700             :                 ev,
    1701             :                 ctx->db,
    1702             :                 key,
    1703             :                 g_lock_dump_fn,
    1704             :                 state,
    1705           0 :                 &state->req_state);
    1706           0 :         if (tevent_req_nomem(subreq, req)) {
    1707           0 :                 return tevent_req_post(req, ev);
    1708             :         }
    1709           0 :         tevent_req_set_callback(subreq, g_lock_dump_done, req);
    1710           0 :         return req;
    1711             : }
    1712             : 
    1713           0 : static void g_lock_dump_done(struct tevent_req *subreq)
    1714             : {
    1715           0 :         struct tevent_req *req = tevent_req_callback_data(
    1716             :                 subreq, struct tevent_req);
    1717           0 :         struct g_lock_dump_state *state = tevent_req_data(
    1718             :                 req, struct g_lock_dump_state);
    1719           0 :         NTSTATUS status;
    1720             : 
    1721           0 :         status = dbwrap_parse_record_recv(subreq);
    1722           0 :         TALLOC_FREE(subreq);
    1723           0 :         if (tevent_req_nterror(req, status) ||
    1724           0 :             tevent_req_nterror(req, state->status)) {
    1725           0 :                 return;
    1726             :         }
    1727           0 :         tevent_req_done(req);
    1728             : }
    1729             : 
    1730           0 : NTSTATUS g_lock_dump_recv(struct tevent_req *req)
    1731             : {
    1732           0 :         return tevent_req_simple_recv_ntstatus(req);
    1733             : }
    1734             : 
    1735      193231 : int g_lock_seqnum(struct g_lock_ctx *ctx)
    1736             : {
    1737      193231 :         return dbwrap_get_seqnum(ctx->db);
    1738             : }
    1739             : 
    1740             : struct g_lock_watch_data_state {
    1741             :         struct tevent_context *ev;
    1742             :         struct g_lock_ctx *ctx;
    1743             :         TDB_DATA key;
    1744             :         struct server_id blocker;
    1745             :         bool blockerdead;
    1746             :         uint64_t unique_lock_epoch;
    1747             :         uint64_t unique_data_epoch;
    1748             :         uint64_t watch_instance;
    1749             :         NTSTATUS status;
    1750             : };
    1751             : 
    1752             : static void g_lock_watch_data_done(struct tevent_req *subreq);
    1753             : 
    1754         653 : static void g_lock_watch_data_send_fn(
    1755             :         struct db_record *rec,
    1756             :         TDB_DATA value,
    1757             :         void *private_data)
    1758             : {
    1759         653 :         struct tevent_req *req = talloc_get_type_abort(
    1760             :                 private_data, struct tevent_req);
    1761         653 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1762             :                 req, struct g_lock_watch_data_state);
    1763         653 :         struct tevent_req *subreq = NULL;
    1764           3 :         struct g_lock lck;
    1765           3 :         bool ok;
    1766             : 
    1767         653 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1768         653 :         if (!ok) {
    1769           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1770           0 :                 return;
    1771             :         }
    1772         653 :         state->unique_lock_epoch = lck.unique_lock_epoch;
    1773         653 :         state->unique_data_epoch = lck.unique_data_epoch;
    1774             : 
    1775         653 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
    1776             : 
    1777         653 :         subreq = dbwrap_watched_watch_send(
    1778             :                 state, state->ev, rec, 0, state->blocker);
    1779         653 :         if (subreq == NULL) {
    1780           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1781           0 :                 return;
    1782             :         }
    1783         653 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1784             : 
    1785         653 :         state->status = NT_STATUS_EVENT_PENDING;
    1786             : }
    1787             : 
    1788         653 : struct tevent_req *g_lock_watch_data_send(
    1789             :         TALLOC_CTX *mem_ctx,
    1790             :         struct tevent_context *ev,
    1791             :         struct g_lock_ctx *ctx,
    1792             :         TDB_DATA key,
    1793             :         struct server_id blocker)
    1794             : {
    1795         653 :         struct tevent_req *req = NULL;
    1796         653 :         struct g_lock_watch_data_state *state = NULL;
    1797           3 :         NTSTATUS status;
    1798             : 
    1799         653 :         SMB_ASSERT(!ctx->busy);
    1800             : 
    1801         653 :         req = tevent_req_create(
    1802             :                 mem_ctx, &state, struct g_lock_watch_data_state);
    1803         653 :         if (req == NULL) {
    1804           0 :                 return NULL;
    1805             :         }
    1806         653 :         state->ev = ev;
    1807         653 :         state->ctx = ctx;
    1808         653 :         state->blocker = blocker;
    1809             : 
    1810         653 :         state->key = tdb_data_talloc_copy(state, key);
    1811         653 :         if (tevent_req_nomem(state->key.dptr, req)) {
    1812           0 :                 return tevent_req_post(req, ev);
    1813             :         }
    1814             : 
    1815         653 :         status = dbwrap_do_locked(
    1816             :                 ctx->db, key, g_lock_watch_data_send_fn, req);
    1817         653 :         if (tevent_req_nterror(req, status)) {
    1818           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1819           0 :                 return tevent_req_post(req, ev);
    1820             :         }
    1821             : 
    1822         653 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1823         650 :                 return req;
    1824             :         }
    1825           0 :         if (tevent_req_nterror(req, state->status)) {
    1826           0 :                 return tevent_req_post(req, ev);
    1827             :         }
    1828           0 :         tevent_req_done(req);
    1829           0 :         return tevent_req_post(req, ev);
    1830             : }
    1831             : 
    1832        1003 : static void g_lock_watch_data_done_fn(
    1833             :         struct db_record *rec,
    1834             :         TDB_DATA value,
    1835             :         void *private_data)
    1836             : {
    1837        1003 :         struct tevent_req *req = talloc_get_type_abort(
    1838             :                 private_data, struct tevent_req);
    1839        1003 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1840             :                 req, struct g_lock_watch_data_state);
    1841        1003 :         struct tevent_req *subreq = NULL;
    1842           3 :         struct g_lock lck;
    1843           3 :         bool ok;
    1844             : 
    1845        1003 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1846        1003 :         if (!ok) {
    1847           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1848           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1849           0 :                 return;
    1850             :         }
    1851             : 
    1852        1003 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
    1853          87 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1854          87 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
    1855             :                           "state->unique_data_epoch=%"PRIu64"\n",
    1856             :                           lck.unique_data_epoch,
    1857             :                           state->unique_data_epoch);
    1858          87 :                 state->status = NT_STATUS_OK;
    1859          87 :                 return;
    1860             :         }
    1861             : 
    1862             :         /*
    1863             :          * The lock epoch changed, so we better
    1864             :          * remove ourself from the waiter list
    1865             :          * (most likely the first position)
    1866             :          * and re-add us at the end of the list.
    1867             :          *
    1868             :          * This gives other lock waiters a change
    1869             :          * to make progress.
    1870             :          *
    1871             :          * Otherwise we'll keep our waiter instance alive,
    1872             :          * keep waiting (most likely at first position).
    1873             :          */
    1874         916 :         if (lck.unique_lock_epoch != state->unique_lock_epoch) {
    1875         830 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1876         830 :                 state->watch_instance = dbwrap_watched_watch_add_instance(rec);
    1877         830 :                 state->unique_lock_epoch = lck.unique_lock_epoch;
    1878             :         }
    1879             : 
    1880         916 :         subreq = dbwrap_watched_watch_send(
    1881             :                 state, state->ev, rec, state->watch_instance, state->blocker);
    1882         916 :         if (subreq == NULL) {
    1883           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1884           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1885           0 :                 return;
    1886             :         }
    1887         916 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1888             : 
    1889         916 :         state->status = NT_STATUS_EVENT_PENDING;
    1890             : }
    1891             : 
    1892        1003 : static void g_lock_watch_data_done(struct tevent_req *subreq)
    1893             : {
    1894        1003 :         struct tevent_req *req = tevent_req_callback_data(
    1895             :                 subreq, struct tevent_req);
    1896        1003 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1897             :                 req, struct g_lock_watch_data_state);
    1898           3 :         NTSTATUS status;
    1899        1003 :         uint64_t instance = 0;
    1900             : 
    1901        1003 :         status = dbwrap_watched_watch_recv(
    1902             :                 subreq, &instance, &state->blockerdead, &state->blocker);
    1903        1003 :         TALLOC_FREE(subreq);
    1904        1003 :         if (tevent_req_nterror(req, status)) {
    1905           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
    1906             :                           nt_errstr(status));
    1907         916 :                 return;
    1908             :         }
    1909             : 
    1910        1003 :         state->watch_instance = instance;
    1911             : 
    1912        1003 :         status = dbwrap_do_locked(
    1913        1003 :                 state->ctx->db, state->key, g_lock_watch_data_done_fn, req);
    1914        1003 :         if (tevent_req_nterror(req, status)) {
    1915           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1916           0 :                 return;
    1917             :         }
    1918        1003 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1919         914 :                 return;
    1920             :         }
    1921          87 :         if (tevent_req_nterror(req, state->status)) {
    1922           0 :                 return;
    1923             :         }
    1924          87 :         tevent_req_done(req);
    1925             : }
    1926             : 
    1927          86 : NTSTATUS g_lock_watch_data_recv(
    1928             :         struct tevent_req *req,
    1929             :         bool *blockerdead,
    1930             :         struct server_id *blocker)
    1931             : {
    1932          86 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1933             :                 req, struct g_lock_watch_data_state);
    1934           0 :         NTSTATUS status;
    1935             : 
    1936          86 :         if (tevent_req_is_nterror(req, &status)) {
    1937           0 :                 return status;
    1938             :         }
    1939          86 :         if (blockerdead != NULL) {
    1940          86 :                 *blockerdead = state->blockerdead;
    1941             :         }
    1942          86 :         if (blocker != NULL) {
    1943          86 :                 *blocker = state->blocker;
    1944             :         }
    1945             : 
    1946          86 :         return NT_STATUS_OK;
    1947             : }
    1948             : 
    1949        2328 : static void g_lock_wake_watchers_fn(
    1950             :         struct db_record *rec,
    1951             :         TDB_DATA value,
    1952             :         void *private_data)
    1953             : {
    1954        2328 :         struct g_lock lck = { .exclusive.pid = 0 };
    1955          10 :         NTSTATUS status;
    1956          10 :         bool ok;
    1957             : 
    1958        2328 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1959        2328 :         if (!ok) {
    1960           0 :                 DBG_WARNING("g_lock_parse failed\n");
    1961           0 :                 return;
    1962             :         }
    1963             : 
    1964        2328 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1965             : 
    1966        2328 :         status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1967        2328 :         if (!NT_STATUS_IS_OK(status)) {
    1968           0 :                 DBG_WARNING("g_lock_store failed: %s\n", nt_errstr(status));
    1969           0 :                 return;
    1970             :         }
    1971             : }
    1972             : 
    1973        2328 : void g_lock_wake_watchers(struct g_lock_ctx *ctx, TDB_DATA key)
    1974             : {
    1975          10 :         NTSTATUS status;
    1976             : 
    1977        2328 :         SMB_ASSERT(!ctx->busy);
    1978             : 
    1979        2328 :         status = dbwrap_do_locked(ctx->db, key, g_lock_wake_watchers_fn, NULL);
    1980        2328 :         if (!NT_STATUS_IS_OK(status)) {
    1981           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n",
    1982             :                           nt_errstr(status));
    1983             :         }
    1984        2328 : }

Generated by: LCOV version 1.14